repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
Laufire/ec
ec/modules/helpers.py
getTypeStr
def getTypeStr(_type): r"""Gets the string representation of the given type. """ if isinstance(_type, CustomType): return str(_type) if hasattr(_type, '__name__'): return _type.__name__ return ''
python
def getTypeStr(_type): r"""Gets the string representation of the given type. """ if isinstance(_type, CustomType): return str(_type) if hasattr(_type, '__name__'): return _type.__name__ return ''
[ "def", "getTypeStr", "(", "_type", ")", ":", "if", "isinstance", "(", "_type", ",", "CustomType", ")", ":", "return", "str", "(", "_type", ")", "if", "hasattr", "(", "_type", ",", "'__name__'", ")", ":", "return", "_type", ".", "__name__", "return", "'...
r"""Gets the string representation of the given type.
[ "r", "Gets", "the", "string", "representation", "of", "the", "given", "type", "." ]
train
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/helpers.py#L153-L162
Laufire/ec
ec/modules/helpers.py
reconfigArg
def reconfigArg(ArgConfig): r"""Reconfigures an argument based on its configuration. """ _type = ArgConfig.get('type') if _type: if hasattr(_type, '__ec_config__'): # pass the ArgConfig to the CustomType: _type.__ec_config__(ArgConfig) if not 'type_str' in ArgConfig: ArgConfig['type_str'] = (_type.__name__ if isinstance(_type, type) else 'unspecified type') if _type else 'str' if not 'desc' in ArgConfig: ArgConfig['desc'] = ArgConfig['name'] return ArgConfig
python
def reconfigArg(ArgConfig): r"""Reconfigures an argument based on its configuration. """ _type = ArgConfig.get('type') if _type: if hasattr(_type, '__ec_config__'): # pass the ArgConfig to the CustomType: _type.__ec_config__(ArgConfig) if not 'type_str' in ArgConfig: ArgConfig['type_str'] = (_type.__name__ if isinstance(_type, type) else 'unspecified type') if _type else 'str' if not 'desc' in ArgConfig: ArgConfig['desc'] = ArgConfig['name'] return ArgConfig
[ "def", "reconfigArg", "(", "ArgConfig", ")", ":", "_type", "=", "ArgConfig", ".", "get", "(", "'type'", ")", "if", "_type", ":", "if", "hasattr", "(", "_type", ",", "'__ec_config__'", ")", ":", "# pass the ArgConfig to the CustomType:", "_type", ".", "__ec_con...
r"""Reconfigures an argument based on its configuration.
[ "r", "Reconfigures", "an", "argument", "based", "on", "its", "configuration", "." ]
train
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/helpers.py#L164-L179
Laufire/ec
ec/modules/helpers.py
gatherInput
def gatherInput(**Config): r"""Helps to interactively get user input. """ _type = Config.get('type') while True: try: got = raw_input('%s: ' % getLabel(Config)) except EOFError: got = None if not got and 'default' in Config: return Config['default'] try: return _type(got) if _type else got except ValueError as e: err(str(e) or '<invalid value>') except TypeError: err(str(e) or '<invalid value>')
python
def gatherInput(**Config): r"""Helps to interactively get user input. """ _type = Config.get('type') while True: try: got = raw_input('%s: ' % getLabel(Config)) except EOFError: got = None if not got and 'default' in Config: return Config['default'] try: return _type(got) if _type else got except ValueError as e: err(str(e) or '<invalid value>') except TypeError: err(str(e) or '<invalid value>')
[ "def", "gatherInput", "(", "*", "*", "Config", ")", ":", "_type", "=", "Config", ".", "get", "(", "'type'", ")", "while", "True", ":", "try", ":", "got", "=", "raw_input", "(", "'%s: '", "%", "getLabel", "(", "Config", ")", ")", "except", "EOFError",...
r"""Helps to interactively get user input.
[ "r", "Helps", "to", "interactively", "get", "user", "input", "." ]
train
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/helpers.py#L197-L219
Laufire/ec
ec/modules/helpers.py
getTaskHelp
def getTaskHelp(_Task): r"""Gets help on the given task member. """ Ret = [] for k in ['name', 'desc']: v = _Task.Config.get(k) if v is not None: Ret.append('%s: %s' % (k, v)) Args = _Task.Args if Args: Ret.append('\nArgs:') for argName, Arg in Args.items(): Ret.append(' %s: %s' % (argName, Arg.get('desc', Arg['type_str']))) Ret.append('') return '\n'.join(Ret).rstrip()
python
def getTaskHelp(_Task): r"""Gets help on the given task member. """ Ret = [] for k in ['name', 'desc']: v = _Task.Config.get(k) if v is not None: Ret.append('%s: %s' % (k, v)) Args = _Task.Args if Args: Ret.append('\nArgs:') for argName, Arg in Args.items(): Ret.append(' %s: %s' % (argName, Arg.get('desc', Arg['type_str']))) Ret.append('') return '\n'.join(Ret).rstrip()
[ "def", "getTaskHelp", "(", "_Task", ")", ":", "Ret", "=", "[", "]", "for", "k", "in", "[", "'name'", ",", "'desc'", "]", ":", "v", "=", "_Task", ".", "Config", ".", "get", "(", "k", ")", "if", "v", "is", "not", "None", ":", "Ret", ".", "appen...
r"""Gets help on the given task member.
[ "r", "Gets", "help", "on", "the", "given", "task", "member", "." ]
train
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/helpers.py#L221-L242
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/venv.py
restart_in_venv
def restart_in_venv(venv, base, site_packages, args): """ Restart this script using the interpreter in the given virtual environment """ if base and not os.path.isabs(venv) and not venv.startswith('~'): base = os.path.expanduser(base) # ensure we have an abs basepath at this point: # a relative one makes no sense (or does it?) if os.path.isabs(base): venv = os.path.join(base, venv) if venv.startswith('~'): venv = os.path.expanduser(venv) if not os.path.exists(venv): try: import virtualenv except ImportError: print('The virtual environment does not exist: %s' % venv) print('and virtualenv is not installed, so a new environment cannot be created') sys.exit(3) print('Creating new virtualenv environment in %s' % venv) virtualenv.logger = logger logger.indent += 2 virtualenv.create_environment(venv, site_packages=site_packages) if sys.platform == 'win32': python = os.path.join(venv, 'Scripts', 'python.exe') # check for bin directory which is used in buildouts if not os.path.exists(python): python = os.path.join(venv, 'bin', 'python.exe') else: python = os.path.join(venv, 'bin', 'python') if not os.path.exists(python): python = venv if not os.path.exists(python): raise BadCommand('Cannot find virtual environment interpreter at %s' % python) base = os.path.dirname(os.path.dirname(python)) file = os.path.join(os.path.dirname(__file__), 'runner.py') if file.endswith('.pyc'): file = file[:-1] proc = subprocess.Popen( [python, file] + args + [base, '___VENV_RESTART___']) proc.wait() sys.exit(proc.returncode)
python
def restart_in_venv(venv, base, site_packages, args): """ Restart this script using the interpreter in the given virtual environment """ if base and not os.path.isabs(venv) and not venv.startswith('~'): base = os.path.expanduser(base) # ensure we have an abs basepath at this point: # a relative one makes no sense (or does it?) if os.path.isabs(base): venv = os.path.join(base, venv) if venv.startswith('~'): venv = os.path.expanduser(venv) if not os.path.exists(venv): try: import virtualenv except ImportError: print('The virtual environment does not exist: %s' % venv) print('and virtualenv is not installed, so a new environment cannot be created') sys.exit(3) print('Creating new virtualenv environment in %s' % venv) virtualenv.logger = logger logger.indent += 2 virtualenv.create_environment(venv, site_packages=site_packages) if sys.platform == 'win32': python = os.path.join(venv, 'Scripts', 'python.exe') # check for bin directory which is used in buildouts if not os.path.exists(python): python = os.path.join(venv, 'bin', 'python.exe') else: python = os.path.join(venv, 'bin', 'python') if not os.path.exists(python): python = venv if not os.path.exists(python): raise BadCommand('Cannot find virtual environment interpreter at %s' % python) base = os.path.dirname(os.path.dirname(python)) file = os.path.join(os.path.dirname(__file__), 'runner.py') if file.endswith('.pyc'): file = file[:-1] proc = subprocess.Popen( [python, file] + args + [base, '___VENV_RESTART___']) proc.wait() sys.exit(proc.returncode)
[ "def", "restart_in_venv", "(", "venv", ",", "base", ",", "site_packages", ",", "args", ")", ":", "if", "base", "and", "not", "os", ".", "path", ".", "isabs", "(", "venv", ")", "and", "not", "venv", ".", "startswith", "(", "'~'", ")", ":", "base", "...
Restart this script using the interpreter in the given virtual environment
[ "Restart", "this", "script", "using", "the", "interpreter", "in", "the", "given", "virtual", "environment" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/venv.py#L10-L53
duniter/duniter-python-api
duniterpy/api/bma/tx.py
history
async def history(client: Client, pubkey: str) -> dict: """ Get transactions history of public key :param client: Client to connect to the api :param pubkey: Public key :return: """ return await client.get(MODULE + '/history/%s' % pubkey, schema=HISTORY_SCHEMA)
python
async def history(client: Client, pubkey: str) -> dict: """ Get transactions history of public key :param client: Client to connect to the api :param pubkey: Public key :return: """ return await client.get(MODULE + '/history/%s' % pubkey, schema=HISTORY_SCHEMA)
[ "async", "def", "history", "(", "client", ":", "Client", ",", "pubkey", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/history/%s'", "%", "pubkey", ",", "schema", "=", "HISTORY_SCHEMA", ")" ]
Get transactions history of public key :param client: Client to connect to the api :param pubkey: Public key :return:
[ "Get", "transactions", "history", "of", "public", "key" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/tx.py#L206-L214
duniter/duniter-python-api
duniterpy/api/bma/tx.py
process
async def process(client: Client, transaction_signed_raw: str) -> ClientResponse: """ POST a transaction raw document :param client: Client to connect to the api :param transaction_signed_raw: Transaction signed raw document :return: """ return await client.post(MODULE + '/process', {'transaction': transaction_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
async def process(client: Client, transaction_signed_raw: str) -> ClientResponse: """ POST a transaction raw document :param client: Client to connect to the api :param transaction_signed_raw: Transaction signed raw document :return: """ return await client.post(MODULE + '/process', {'transaction': transaction_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "process", "(", "client", ":", "Client", ",", "transaction_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/process'", ",", "{", "'transaction'", ":", "transaction_signe...
POST a transaction raw document :param client: Client to connect to the api :param transaction_signed_raw: Transaction signed raw document :return:
[ "POST", "a", "transaction", "raw", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/tx.py#L217-L225
duniter/duniter-python-api
duniterpy/api/bma/tx.py
sources
async def sources(client: Client, pubkey: str) -> dict: """ GET transaction sources :param client: Client to connect to the api :param pubkey: Public key :return: """ return await client.get(MODULE + '/sources/%s' % pubkey, schema=SOURCES_SCHEMA)
python
async def sources(client: Client, pubkey: str) -> dict: """ GET transaction sources :param client: Client to connect to the api :param pubkey: Public key :return: """ return await client.get(MODULE + '/sources/%s' % pubkey, schema=SOURCES_SCHEMA)
[ "async", "def", "sources", "(", "client", ":", "Client", ",", "pubkey", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/sources/%s'", "%", "pubkey", ",", "schema", "=", "SOURCES_SCHEMA", ")" ]
GET transaction sources :param client: Client to connect to the api :param pubkey: Public key :return:
[ "GET", "transaction", "sources" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/tx.py#L228-L236
duniter/duniter-python-api
duniterpy/api/bma/tx.py
blocks
async def blocks(client: Client, pubkey: str, start: int, end: int) -> dict: """ GET public key transactions history between start and end block number :param client: Client to connect to the api :param pubkey: Public key :param start: Start from block number :param end: End to block number :return: """ return await client.get(MODULE + '/history/%s/blocks/%s/%s' % (pubkey, start, end), schema=HISTORY_SCHEMA)
python
async def blocks(client: Client, pubkey: str, start: int, end: int) -> dict: """ GET public key transactions history between start and end block number :param client: Client to connect to the api :param pubkey: Public key :param start: Start from block number :param end: End to block number :return: """ return await client.get(MODULE + '/history/%s/blocks/%s/%s' % (pubkey, start, end), schema=HISTORY_SCHEMA)
[ "async", "def", "blocks", "(", "client", ":", "Client", ",", "pubkey", ":", "str", ",", "start", ":", "int", ",", "end", ":", "int", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/history/%s/blocks/%s/%s'", "%",...
GET public key transactions history between start and end block number :param client: Client to connect to the api :param pubkey: Public key :param start: Start from block number :param end: End to block number :return:
[ "GET", "public", "key", "transactions", "history", "between", "start", "and", "end", "block", "number" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/tx.py#L250-L260
onyxfish/clan
clan/report.py
ReportCommand.add_argparser
def add_argparser(self, root, parents): """ Add arguments for this command. """ parser = root.add_parser('report', parents=parents) parser.set_defaults(func=self) parser.add_argument( '--auth', dest='auth', action='store', help='Path to the authorized credentials file (analytics.dat).' ) parser.add_argument( '--title', dest='title', action='store', help='User-friendly title for your report.' ) parser.add_argument( '--property-id', dest='property-id', action='store', help='Google Analytics ID of the property to query.' ) parser.add_argument( '--start-date', dest='start-date', action='store', help='Start date for the query in YYYY-MM-DD format.' ) parser.add_argument( '--end-date', dest='end-date', action='store', help='End date for the query in YYYY-MM-DD format. Supersedes --ndays.' ) parser.add_argument( '--ndays', dest='ndays', action='store', type=int, help='The number of days from the start-date to query. Requires start-date. Superseded by end-date.' ) parser.add_argument( '--domain', dest='domain', action='store', help='Restrict results to only urls with this domain.' ) parser.add_argument( '--prefix', dest='prefix', action='store', help='Restrict results to only urls with this prefix.' ) parser.add_argument( 'input_path', action='store', help='Path to either a YAML configuration file or pre-reported JSON data.' ) parser.add_argument( 'output_path', action='store', help='Path to output either an HTML report or a JSON data file.' ) return parser
python
def add_argparser(self, root, parents): """ Add arguments for this command. """ parser = root.add_parser('report', parents=parents) parser.set_defaults(func=self) parser.add_argument( '--auth', dest='auth', action='store', help='Path to the authorized credentials file (analytics.dat).' ) parser.add_argument( '--title', dest='title', action='store', help='User-friendly title for your report.' ) parser.add_argument( '--property-id', dest='property-id', action='store', help='Google Analytics ID of the property to query.' ) parser.add_argument( '--start-date', dest='start-date', action='store', help='Start date for the query in YYYY-MM-DD format.' ) parser.add_argument( '--end-date', dest='end-date', action='store', help='End date for the query in YYYY-MM-DD format. Supersedes --ndays.' ) parser.add_argument( '--ndays', dest='ndays', action='store', type=int, help='The number of days from the start-date to query. Requires start-date. Superseded by end-date.' ) parser.add_argument( '--domain', dest='domain', action='store', help='Restrict results to only urls with this domain.' ) parser.add_argument( '--prefix', dest='prefix', action='store', help='Restrict results to only urls with this prefix.' ) parser.add_argument( 'input_path', action='store', help='Path to either a YAML configuration file or pre-reported JSON data.' ) parser.add_argument( 'output_path', action='store', help='Path to output either an HTML report or a JSON data file.' ) return parser
[ "def", "add_argparser", "(", "self", ",", "root", ",", "parents", ")", ":", "parser", "=", "root", ".", "add_parser", "(", "'report'", ",", "parents", "=", "parents", ")", "parser", ".", "set_defaults", "(", "func", "=", "self", ")", "parser", ".", "ad...
Add arguments for this command.
[ "Add", "arguments", "for", "this", "command", "." ]
train
https://github.com/onyxfish/clan/blob/415ddd027ea81013f2d62d75aec6da70703df49c/clan/report.py#L73-L140
onyxfish/clan
clan/report.py
ReportCommand._ndays
def _ndays(self, start_date, ndays): """ Compute an end date given a start date and a number of days. """ if not getattr(self.args, 'start-date') and not self.config.get('start-date', None): raise Exception('start-date must be provided when ndays is used.') d = date(*map(int, start_date.split('-'))) d += timedelta(days=ndays) return d.strftime('%Y-%m-%d')
python
def _ndays(self, start_date, ndays): """ Compute an end date given a start date and a number of days. """ if not getattr(self.args, 'start-date') and not self.config.get('start-date', None): raise Exception('start-date must be provided when ndays is used.') d = date(*map(int, start_date.split('-'))) d += timedelta(days=ndays) return d.strftime('%Y-%m-%d')
[ "def", "_ndays", "(", "self", ",", "start_date", ",", "ndays", ")", ":", "if", "not", "getattr", "(", "self", ".", "args", ",", "'start-date'", ")", "and", "not", "self", ".", "config", ".", "get", "(", "'start-date'", ",", "None", ")", ":", "raise",...
Compute an end date given a start date and a number of days.
[ "Compute", "an", "end", "date", "given", "a", "start", "date", "and", "a", "number", "of", "days", "." ]
train
https://github.com/onyxfish/clan/blob/415ddd027ea81013f2d62d75aec6da70703df49c/clan/report.py#L142-L152
onyxfish/clan
clan/report.py
ReportCommand.query
def query(self, start_date=None, end_date=None, ndays=None, metrics=[], dimensions=[], filters=None, segment=None, sort=[], start_index=1, max_results=10): """ Execute a query. """ if start_date: start_date = start_date elif getattr(self.args, 'start-date'): start_date = getattr(self.args, 'start-date') elif self.config.get('start-date', None): start_date = self.config['start-date'] else: start_date = '2005-01-01' if end_date: end_date = end_date elif getattr(self.args, 'end-date'): end_date = getattr(self.args, 'end-date') elif self.config.get('end-date', None): end_date = self.config['end-date'] elif ndays: end_date = self._ndays(start_date, ndays) elif self.args.ndays: end_date = self._ndays(start_date, self.args.ndays) elif self.config.get('ndays', None): end_date = self._ndays(start_date, self.config['ndays']) else: end_date = 'today' if self.args.domain: domain = self.args.domain elif self.config.get('domain', None): domain = self.config['domain'] else: domain = None if domain: domain_filter = 'ga:hostname==%s' % domain if filters: filters = '%s;%s' % (domain_filter, filters) else: filters = domain_filter if self.args.prefix: prefix = self.args.prefix elif self.config.get('prefix', None): prefix = self.config['prefix'] else: prefix = None if prefix: prefix_filter = 'ga:pagePath=~^%s' % prefix if filters: filters = '%s;%s' % (prefix_filter, filters) else: filters = prefix_filter return self.service.data().ga().get( ids='ga:' + self.config['property-id'], start_date=start_date, end_date=end_date, metrics=','.join(metrics) or None, dimensions=','.join(dimensions) or None, filters=filters, segment=segment, sort=','.join(sort) or None, start_index=str(start_index), max_results=str(max_results) ).execute()
python
def query(self, start_date=None, end_date=None, ndays=None, metrics=[], dimensions=[], filters=None, segment=None, sort=[], start_index=1, max_results=10): """ Execute a query. """ if start_date: start_date = start_date elif getattr(self.args, 'start-date'): start_date = getattr(self.args, 'start-date') elif self.config.get('start-date', None): start_date = self.config['start-date'] else: start_date = '2005-01-01' if end_date: end_date = end_date elif getattr(self.args, 'end-date'): end_date = getattr(self.args, 'end-date') elif self.config.get('end-date', None): end_date = self.config['end-date'] elif ndays: end_date = self._ndays(start_date, ndays) elif self.args.ndays: end_date = self._ndays(start_date, self.args.ndays) elif self.config.get('ndays', None): end_date = self._ndays(start_date, self.config['ndays']) else: end_date = 'today' if self.args.domain: domain = self.args.domain elif self.config.get('domain', None): domain = self.config['domain'] else: domain = None if domain: domain_filter = 'ga:hostname==%s' % domain if filters: filters = '%s;%s' % (domain_filter, filters) else: filters = domain_filter if self.args.prefix: prefix = self.args.prefix elif self.config.get('prefix', None): prefix = self.config['prefix'] else: prefix = None if prefix: prefix_filter = 'ga:pagePath=~^%s' % prefix if filters: filters = '%s;%s' % (prefix_filter, filters) else: filters = prefix_filter return self.service.data().ga().get( ids='ga:' + self.config['property-id'], start_date=start_date, end_date=end_date, metrics=','.join(metrics) or None, dimensions=','.join(dimensions) or None, filters=filters, segment=segment, sort=','.join(sort) or None, start_index=str(start_index), max_results=str(max_results) ).execute()
[ "def", "query", "(", "self", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "ndays", "=", "None", ",", "metrics", "=", "[", "]", ",", "dimensions", "=", "[", "]", ",", "filters", "=", "None", ",", "segment", "=", "None", ",", ...
Execute a query.
[ "Execute", "a", "query", "." ]
train
https://github.com/onyxfish/clan/blob/415ddd027ea81013f2d62d75aec6da70703df49c/clan/report.py#L154-L223
onyxfish/clan
clan/report.py
ReportCommand.report
def report(self): """ Query analytics and stash data in a format suitable for serializing. """ output = OrderedDict() for arg in GLOBAL_ARGUMENTS: output[arg] = getattr(self.args, arg) or self.config.get(arg, None) output['title'] = getattr(self.args, 'title') or self.config.get('title', 'Untitled Report') output['run_date'] = datetime.now().strftime('%Y-%m-%d') output['queries'] = [] for analytic in self.config.get('queries', []): print 'Querying "%s"' % analytic['name'] results = self.query( metrics=analytic['metrics'], dimensions=analytic.get('dimensions', []), filters=analytic.get('filter', None), segment=analytic.get('segment', None), sort=analytic.get('sort', []), start_index=analytic.get('start-index', 1), max_results=analytic.get('max-results', 10) ) dimensions_len = len(analytic.get('dimensions', [])) data = OrderedDict([ ('config', analytic), ('sampled', results.get('containsSampledData', False)), ('sampleSize', int(results.get('sampleSize', 0))), ('sampleSpace', int(results.get('sampleSpace', 0))), ('data_types', OrderedDict()), ('data', OrderedDict()) ]) for column in results['columnHeaders'][dimensions_len:]: data['data_types'][column['name']] = column['dataType'] def cast_data_type(d, dt): if dt == 'INTEGER': return int(d) elif data_type in ['TIME', 'FLOAT', 'CURRENCY', 'PERCENT']: return float(d) else: raise Exception('Unknown metric data type: %s' % data_type) for i, metric in enumerate(analytic['metrics']): data['data'][metric] = OrderedDict() data_type = data['data_types'][metric] if dimensions_len: for row in results.get('rows', []): column = i + dimensions_len label = ','.join(row[:dimensions_len]) value = cast_data_type(row[column], data_type) data['data'][metric][label] = value data['data'][metric]['total'] = cast_data_type(results['totalsForAllResults'][metric], data_type) # Prevent rate-limiting sleep(1) output['queries'].append(data) return output
python
def report(self): """ Query analytics and stash data in a format suitable for serializing. """ output = OrderedDict() for arg in GLOBAL_ARGUMENTS: output[arg] = getattr(self.args, arg) or self.config.get(arg, None) output['title'] = getattr(self.args, 'title') or self.config.get('title', 'Untitled Report') output['run_date'] = datetime.now().strftime('%Y-%m-%d') output['queries'] = [] for analytic in self.config.get('queries', []): print 'Querying "%s"' % analytic['name'] results = self.query( metrics=analytic['metrics'], dimensions=analytic.get('dimensions', []), filters=analytic.get('filter', None), segment=analytic.get('segment', None), sort=analytic.get('sort', []), start_index=analytic.get('start-index', 1), max_results=analytic.get('max-results', 10) ) dimensions_len = len(analytic.get('dimensions', [])) data = OrderedDict([ ('config', analytic), ('sampled', results.get('containsSampledData', False)), ('sampleSize', int(results.get('sampleSize', 0))), ('sampleSpace', int(results.get('sampleSpace', 0))), ('data_types', OrderedDict()), ('data', OrderedDict()) ]) for column in results['columnHeaders'][dimensions_len:]: data['data_types'][column['name']] = column['dataType'] def cast_data_type(d, dt): if dt == 'INTEGER': return int(d) elif data_type in ['TIME', 'FLOAT', 'CURRENCY', 'PERCENT']: return float(d) else: raise Exception('Unknown metric data type: %s' % data_type) for i, metric in enumerate(analytic['metrics']): data['data'][metric] = OrderedDict() data_type = data['data_types'][metric] if dimensions_len: for row in results.get('rows', []): column = i + dimensions_len label = ','.join(row[:dimensions_len]) value = cast_data_type(row[column], data_type) data['data'][metric][label] = value data['data'][metric]['total'] = cast_data_type(results['totalsForAllResults'][metric], data_type) # Prevent rate-limiting sleep(1) output['queries'].append(data) return output
[ "def", "report", "(", "self", ")", ":", "output", "=", "OrderedDict", "(", ")", "for", "arg", "in", "GLOBAL_ARGUMENTS", ":", "output", "[", "arg", "]", "=", "getattr", "(", "self", ".", "args", ",", "arg", ")", "or", "self", ".", "config", ".", "ge...
Query analytics and stash data in a format suitable for serializing.
[ "Query", "analytics", "and", "stash", "data", "in", "a", "format", "suitable", "for", "serializing", "." ]
train
https://github.com/onyxfish/clan/blob/415ddd027ea81013f2d62d75aec6da70703df49c/clan/report.py#L225-L292
onyxfish/clan
clan/report.py
ReportCommand.html
def html(self, report, f): """ Write report data to an HTML file. """ env = Environment(loader=PackageLoader('clan', 'templates')) template = env.get_template('report.html') context = { 'report': report, 'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS, 'field_definitions': self.field_definitions, 'format_comma': format_comma, 'format_duration': format_duration, 'format_percent': format_percent } f.write(template.render(**context).encode('utf-8'))
python
def html(self, report, f): """ Write report data to an HTML file. """ env = Environment(loader=PackageLoader('clan', 'templates')) template = env.get_template('report.html') context = { 'report': report, 'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS, 'field_definitions': self.field_definitions, 'format_comma': format_comma, 'format_duration': format_duration, 'format_percent': format_percent } f.write(template.render(**context).encode('utf-8'))
[ "def", "html", "(", "self", ",", "report", ",", "f", ")", ":", "env", "=", "Environment", "(", "loader", "=", "PackageLoader", "(", "'clan'", ",", "'templates'", ")", ")", "template", "=", "env", ".", "get_template", "(", "'report.html'", ")", "context",...
Write report data to an HTML file.
[ "Write", "report", "data", "to", "an", "HTML", "file", "." ]
train
https://github.com/onyxfish/clan/blob/415ddd027ea81013f2d62d75aec6da70703df49c/clan/report.py#L294-L311
etcher-be/epab
epab/utils/_ensure_exe.py
ensure_exe
def ensure_exe(exe_name: str, *paths: str): # pragma: no cover """ Makes sure that an executable can be found on the system path. Will exit the program if the executable cannot be found Args: exe_name: name of the executable paths: optional path(s) to be searched; if not specified, search the whole system """ if not elib_run.find_executable(exe_name, *paths): LOGGER.error('could not find "%s.exe" on this system', exe_name) sys.exit(-1)
python
def ensure_exe(exe_name: str, *paths: str): # pragma: no cover """ Makes sure that an executable can be found on the system path. Will exit the program if the executable cannot be found Args: exe_name: name of the executable paths: optional path(s) to be searched; if not specified, search the whole system """ if not elib_run.find_executable(exe_name, *paths): LOGGER.error('could not find "%s.exe" on this system', exe_name) sys.exit(-1)
[ "def", "ensure_exe", "(", "exe_name", ":", "str", ",", "*", "paths", ":", "str", ")", ":", "# pragma: no cover", "if", "not", "elib_run", ".", "find_executable", "(", "exe_name", ",", "*", "paths", ")", ":", "LOGGER", ".", "error", "(", "'could not find \"...
Makes sure that an executable can be found on the system path. Will exit the program if the executable cannot be found Args: exe_name: name of the executable paths: optional path(s) to be searched; if not specified, search the whole system
[ "Makes", "sure", "that", "an", "executable", "can", "be", "found", "on", "the", "system", "path", ".", "Will", "exit", "the", "program", "if", "the", "executable", "cannot", "be", "found" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_ensure_exe.py#L14-L26
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.handle_captcha
def handle_captcha(self, query_params: dict, html: str, login_data: dict) -> requests.Response: """ Handling CAPTCHA request """ check_url = get_base_url(html) captcha_url = '{}?s={}&sid={}'.format(self.CAPTCHA_URI, query_params['s'], query_params['sid']) login_data['captcha_sid'] = query_params['sid'] login_data['captcha_key'] = input(self.CAPTCHA_INPUT_PROMPT .format(captcha_url)) return self.post(check_url, login_data)
python
def handle_captcha(self, query_params: dict, html: str, login_data: dict) -> requests.Response: """ Handling CAPTCHA request """ check_url = get_base_url(html) captcha_url = '{}?s={}&sid={}'.format(self.CAPTCHA_URI, query_params['s'], query_params['sid']) login_data['captcha_sid'] = query_params['sid'] login_data['captcha_key'] = input(self.CAPTCHA_INPUT_PROMPT .format(captcha_url)) return self.post(check_url, login_data)
[ "def", "handle_captcha", "(", "self", ",", "query_params", ":", "dict", ",", "html", ":", "str", ",", "login_data", ":", "dict", ")", "->", "requests", ".", "Response", ":", "check_url", "=", "get_base_url", "(", "html", ")", "captcha_url", "=", "'{}?s={}&...
Handling CAPTCHA request
[ "Handling", "CAPTCHA", "request" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L38-L51
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.handle_two_factor_check
def handle_two_factor_check(self, html: str) -> requests.Response: """ Handling two factor authorization request """ action_url = get_base_url(html) code = input(self.TWO_FACTOR_PROMPT).strip() data = {'code': code, '_ajax': '1', 'remember': '1'} post_url = '/'.join((self.LOGIN_URL, action_url)) return self.post(post_url, data)
python
def handle_two_factor_check(self, html: str) -> requests.Response: """ Handling two factor authorization request """ action_url = get_base_url(html) code = input(self.TWO_FACTOR_PROMPT).strip() data = {'code': code, '_ajax': '1', 'remember': '1'} post_url = '/'.join((self.LOGIN_URL, action_url)) return self.post(post_url, data)
[ "def", "handle_two_factor_check", "(", "self", ",", "html", ":", "str", ")", "->", "requests", ".", "Response", ":", "action_url", "=", "get_base_url", "(", "html", ")", "code", "=", "input", "(", "self", ".", "TWO_FACTOR_PROMPT", ")", ".", "strip", "(", ...
Handling two factor authorization request
[ "Handling", "two", "factor", "authorization", "request" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L53-L61
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.handle_phone_number_check
def handle_phone_number_check(self, html: str) -> requests.Response: """ Handling phone number request """ action_url = get_base_url(html) phone_number = input(self.PHONE_PROMPT) url_params = get_url_params(action_url) data = {'code': phone_number, 'act': 'security_check', 'hash': url_params['hash']} post_url = '/'.join((self.LOGIN_URL, action_url)) return self.post(post_url, data)
python
def handle_phone_number_check(self, html: str) -> requests.Response: """ Handling phone number request """ action_url = get_base_url(html) phone_number = input(self.PHONE_PROMPT) url_params = get_url_params(action_url) data = {'code': phone_number, 'act': 'security_check', 'hash': url_params['hash']} post_url = '/'.join((self.LOGIN_URL, action_url)) return self.post(post_url, data)
[ "def", "handle_phone_number_check", "(", "self", ",", "html", ":", "str", ")", "->", "requests", ".", "Response", ":", "action_url", "=", "get_base_url", "(", "html", ")", "phone_number", "=", "input", "(", "self", ".", "PHONE_PROMPT", ")", "url_params", "="...
Handling phone number request
[ "Handling", "phone", "number", "request" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L63-L74
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.check_for_additional_actions
def check_for_additional_actions(self, url_params: dict, html: str, login_data: dict) -> None: """ Checks the url for a request for additional actions, if so, calls the event handler """ action_response = '' if 'sid' in url_params: action_response = self.handle_captcha(url_params, html, login_data) elif 'authcheck' in url_params: action_response = self.handle_two_factor_check(html) elif 'security_check' in url_params: action_response = self.handle_phone_number_check(html) if action_response: check_page_for_warnings(action_response.text)
python
def check_for_additional_actions(self, url_params: dict, html: str, login_data: dict) -> None: """ Checks the url for a request for additional actions, if so, calls the event handler """ action_response = '' if 'sid' in url_params: action_response = self.handle_captcha(url_params, html, login_data) elif 'authcheck' in url_params: action_response = self.handle_two_factor_check(html) elif 'security_check' in url_params: action_response = self.handle_phone_number_check(html) if action_response: check_page_for_warnings(action_response.text)
[ "def", "check_for_additional_actions", "(", "self", ",", "url_params", ":", "dict", ",", "html", ":", "str", ",", "login_data", ":", "dict", ")", "->", "None", ":", "action_response", "=", "''", "if", "'sid'", "in", "url_params", ":", "action_response", "=",...
Checks the url for a request for additional actions, if so, calls the event handler
[ "Checks", "the", "url", "for", "a", "request", "for", "additional", "actions", "if", "so", "calls", "the", "event", "handler" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L76-L91
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.login
def login(self) -> bool: """ Authorizes a user and returns a bool value of the result """ response = self.get(self.LOGIN_URL) login_url = get_base_url(response.text) login_data = {'email': self._login, 'pass': self._password} login_response = self.post(login_url, login_data) url_params = get_url_params(login_response.url) self.check_for_additional_actions(url_params, login_response.text, login_data) if 'remixsid' in self.cookies or 'remixsid6' in self.cookies: return True
python
def login(self) -> bool: """ Authorizes a user and returns a bool value of the result """ response = self.get(self.LOGIN_URL) login_url = get_base_url(response.text) login_data = {'email': self._login, 'pass': self._password} login_response = self.post(login_url, login_data) url_params = get_url_params(login_response.url) self.check_for_additional_actions(url_params, login_response.text, login_data) if 'remixsid' in self.cookies or 'remixsid6' in self.cookies: return True
[ "def", "login", "(", "self", ")", "->", "bool", ":", "response", "=", "self", ".", "get", "(", "self", ".", "LOGIN_URL", ")", "login_url", "=", "get_base_url", "(", "response", ".", "text", ")", "login_data", "=", "{", "'email'", ":", "self", ".", "_...
Authorizes a user and returns a bool value of the result
[ "Authorizes", "a", "user", "and", "returns", "a", "bool", "value", "of", "the", "result" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L93-L106
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.auth_oauth2
def auth_oauth2(self) -> dict: """ Authorizes a user by OAuth2 to get access token """ oauth_data = { 'client_id': self._app_id, 'display': 'mobile', 'response_type': 'token', 'scope': '+66560', 'v': self.API_VERSION } response = self.post(self.OAUTH_URL, oauth_data) url_params = get_url_params(response.url, fragment=True) if 'access_token' in url_params: return url_params action_url = get_base_url(response.text) if action_url: response = self.get(action_url) return get_url_params(response.url) response_json = response.json() if 'error' in response_json['error']: exception_msg = '{}: {}'.format(response_json['error'], response_json['error_description']) raise VVKAuthException(exception_msg)
python
def auth_oauth2(self) -> dict: """ Authorizes a user by OAuth2 to get access token """ oauth_data = { 'client_id': self._app_id, 'display': 'mobile', 'response_type': 'token', 'scope': '+66560', 'v': self.API_VERSION } response = self.post(self.OAUTH_URL, oauth_data) url_params = get_url_params(response.url, fragment=True) if 'access_token' in url_params: return url_params action_url = get_base_url(response.text) if action_url: response = self.get(action_url) return get_url_params(response.url) response_json = response.json() if 'error' in response_json['error']: exception_msg = '{}: {}'.format(response_json['error'], response_json['error_description']) raise VVKAuthException(exception_msg)
[ "def", "auth_oauth2", "(", "self", ")", "->", "dict", ":", "oauth_data", "=", "{", "'client_id'", ":", "self", ".", "_app_id", ",", "'display'", ":", "'mobile'", ",", "'response_type'", ":", "'token'", ",", "'scope'", ":", "'+66560'", ",", "'v'", ":", "s...
Authorizes a user by OAuth2 to get access token
[ "Authorizes", "a", "user", "by", "OAuth2", "to", "get", "access", "token" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L108-L133
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.get_access_token
def get_access_token(self) -> str: """ Returns the access token in case of successful authorization """ if self._service_token: return self._service_token if self._app_id and self._login and self._password: try: if self.login(): url_params = self.auth_oauth2() if 'access_token' in url_params: return url_params['access_token'] finally: self.close()
python
def get_access_token(self) -> str: """ Returns the access token in case of successful authorization """ if self._service_token: return self._service_token if self._app_id and self._login and self._password: try: if self.login(): url_params = self.auth_oauth2() if 'access_token' in url_params: return url_params['access_token'] finally: self.close()
[ "def", "get_access_token", "(", "self", ")", "->", "str", ":", "if", "self", ".", "_service_token", ":", "return", "self", ".", "_service_token", "if", "self", ".", "_app_id", "and", "self", ".", "_login", "and", "self", ".", "_password", ":", "try", ":"...
Returns the access token in case of successful authorization
[ "Returns", "the", "access", "token", "in", "case", "of", "successful", "authorization" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L135-L148
vadimk2016/v-vk-api
v_vk_api/session.py
APISession.send_method_request
def send_method_request(self, method: str, method_params: dict) -> dict: """ Sends user-defined method and method params """ url = '/'.join((self.METHOD_URL, method)) method_params['v'] = self.API_VERSION if self._access_token: method_params['access_token'] = self._access_token response = self.post(url, method_params, timeout=10) response.raise_for_status() return json.loads(response.text)
python
def send_method_request(self, method: str, method_params: dict) -> dict: """ Sends user-defined method and method params """ url = '/'.join((self.METHOD_URL, method)) method_params['v'] = self.API_VERSION if self._access_token: method_params['access_token'] = self._access_token response = self.post(url, method_params, timeout=10) response.raise_for_status() return json.loads(response.text)
[ "def", "send_method_request", "(", "self", ",", "method", ":", "str", ",", "method_params", ":", "dict", ")", "->", "dict", ":", "url", "=", "'/'", ".", "join", "(", "(", "self", ".", "METHOD_URL", ",", "method", ")", ")", "method_params", "[", "'v'", ...
Sends user-defined method and method params
[ "Sends", "user", "-", "defined", "method", "and", "method", "params" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/session.py#L150-L160
ekatek/char-classify
chClassifier/phrase_classifier.py
PhraseClassificationTrainer.train
def train(self, net_sizes, epochs, batchsize): """ Initialize the base trainer """ self.trainer = ClassificationTrainer(self.data, self.targets, net_sizes) self.trainer.learn(epochs, batchsize) return self.trainer.evaluate(batchsize)
python
def train(self, net_sizes, epochs, batchsize): """ Initialize the base trainer """ self.trainer = ClassificationTrainer(self.data, self.targets, net_sizes) self.trainer.learn(epochs, batchsize) return self.trainer.evaluate(batchsize)
[ "def", "train", "(", "self", ",", "net_sizes", ",", "epochs", ",", "batchsize", ")", ":", "self", ".", "trainer", "=", "ClassificationTrainer", "(", "self", ".", "data", ",", "self", ".", "targets", ",", "net_sizes", ")", "self", ".", "trainer", ".", "...
Initialize the base trainer
[ "Initialize", "the", "base", "trainer" ]
train
https://github.com/ekatek/char-classify/blob/35fcfeac32d7e939e98efb528fab4ca2a45c20c5/chClassifier/phrase_classifier.py#L67-L71
ekatek/char-classify
chClassifier/phrase_classifier.py
PhraseClassificationTrainer.classify
def classify(self, phrase, cut_to_len=True): """ Classify a phrase based on the model. (See corresponding function in PhraseClassifier). Provided here mostly to help verify that a created model is worth saving. Technically, the results of the training should be enough for that, but it is good to be able to run it on concrete examples. """ if (len(phrase) > self.max_phrase_len): if not cut_to_len: raise Exception("Phrase too long.") phrase = phrase[0:self.max_phrase_len] if (self.trainer == None): raise Exception("Must train the classifier at least once before classifying") numbers = self.trainer.classify(stringToVector(phrase, self.vocab, self.max_vector_len)) return zip(self.targetTranslate, numbers)
python
def classify(self, phrase, cut_to_len=True): """ Classify a phrase based on the model. (See corresponding function in PhraseClassifier). Provided here mostly to help verify that a created model is worth saving. Technically, the results of the training should be enough for that, but it is good to be able to run it on concrete examples. """ if (len(phrase) > self.max_phrase_len): if not cut_to_len: raise Exception("Phrase too long.") phrase = phrase[0:self.max_phrase_len] if (self.trainer == None): raise Exception("Must train the classifier at least once before classifying") numbers = self.trainer.classify(stringToVector(phrase, self.vocab, self.max_vector_len)) return zip(self.targetTranslate, numbers)
[ "def", "classify", "(", "self", ",", "phrase", ",", "cut_to_len", "=", "True", ")", ":", "if", "(", "len", "(", "phrase", ")", ">", "self", ".", "max_phrase_len", ")", ":", "if", "not", "cut_to_len", ":", "raise", "Exception", "(", "\"Phrase too long.\""...
Classify a phrase based on the model. (See corresponding function in PhraseClassifier). Provided here mostly to help verify that a created model is worth saving. Technically, the results of the training should be enough for that, but it is good to be able to run it on concrete examples.
[ "Classify", "a", "phrase", "based", "on", "the", "model", ".", "(", "See", "corresponding", "function", "in", "PhraseClassifier", ")", ".", "Provided", "here", "mostly", "to", "help", "verify", "that", "a", "created", "model", "is", "worth", "saving", ".", ...
train
https://github.com/ekatek/char-classify/blob/35fcfeac32d7e939e98efb528fab4ca2a45c20c5/chClassifier/phrase_classifier.py#L73-L87
ekatek/char-classify
chClassifier/phrase_classifier.py
PhraseClassifier.classify
def classify(self, phrase, cut_to_len=True): """ Classify a phrase based on the loaded model. If cut_to_len is True, cut to desired length.""" if (len(phrase) > self.max_phrase_len): if not cut_to_len: raise Exception("Phrase too long.") phrase = phrase[0:self.max_phrase_len] numbers = self.classifier.classify(stringToVector(phrase, self.vocab, self.max_vector_len)) return zip(self.targets, numbers)
python
def classify(self, phrase, cut_to_len=True): """ Classify a phrase based on the loaded model. If cut_to_len is True, cut to desired length.""" if (len(phrase) > self.max_phrase_len): if not cut_to_len: raise Exception("Phrase too long.") phrase = phrase[0:self.max_phrase_len] numbers = self.classifier.classify(stringToVector(phrase, self.vocab, self.max_vector_len)) return zip(self.targets, numbers)
[ "def", "classify", "(", "self", ",", "phrase", ",", "cut_to_len", "=", "True", ")", ":", "if", "(", "len", "(", "phrase", ")", ">", "self", ".", "max_phrase_len", ")", ":", "if", "not", "cut_to_len", ":", "raise", "Exception", "(", "\"Phrase too long.\""...
Classify a phrase based on the loaded model. If cut_to_len is True, cut to desired length.
[ "Classify", "a", "phrase", "based", "on", "the", "loaded", "model", ".", "If", "cut_to_len", "is", "True", "cut", "to", "desired", "length", "." ]
train
https://github.com/ekatek/char-classify/blob/35fcfeac32d7e939e98efb528fab4ca2a45c20c5/chClassifier/phrase_classifier.py#L130-L139
oblalex/verboselib
verboselib/management/utils.py
find_command
def find_command(cmd, path=None, pathext=None): """ Taken `from Django http://bit.ly/1njB3Y9>`_. """ if path is None: path = os.environ.get('PATH', '').split(os.pathsep) if isinstance(path, string_types): path = [path] # check if there are path extensions for Windows executables if pathext is None: pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD') pathext = pathext.split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [''] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None
python
def find_command(cmd, path=None, pathext=None): """ Taken `from Django http://bit.ly/1njB3Y9>`_. """ if path is None: path = os.environ.get('PATH', '').split(os.pathsep) if isinstance(path, string_types): path = [path] # check if there are path extensions for Windows executables if pathext is None: pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD') pathext = pathext.split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [''] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None
[ "def", "find_command", "(", "cmd", ",", "path", "=", "None", ",", "pathext", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pa...
Taken `from Django http://bit.ly/1njB3Y9>`_.
[ "Taken", "from", "Django", "http", ":", "//", "bit", ".", "ly", "/", "1njB3Y9", ">", "_", "." ]
train
https://github.com/oblalex/verboselib/blob/3c108bef060b091e1f7c08861ab07672c87ddcff/verboselib/management/utils.py#L10-L39
oblalex/verboselib
verboselib/management/utils.py
handle_extensions
def handle_extensions(extensions=None, ignored=None): """ Organizes multiple extensions that are separated with commas or passed by using --extension/-e multiple times. Note that the .py extension is ignored here because of the way non-*.py files are handled in ``extract`` messages (they are copied to file.ext.py files to trick xgettext to parse them as Python files). For example: running:: $ verboselib-manage extract -e js,txt -e xhtml -a would result in an extension list ``['.js', '.txt', '.xhtml']`` .. code-block:: python >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) set(['.html', '.js']) >>> handle_extensions(['.html, txt,.tpl']) set(['.html', '.tpl', '.txt']) Taken `from Django <http://bit.ly/1r7Eokw>`_ and changed a bit. """ extensions = extensions or () ignored = ignored or ('py', ) ext_list = [] for ext in extensions: ext_list.extend(ext.replace(' ', '').split(',')) for i, ext in enumerate(ext_list): if not ext.startswith('.'): ext_list[i] = '.%s' % ext_list[i] return set([x for x in ext_list if x.strip('.') not in ignored])
python
def handle_extensions(extensions=None, ignored=None): """ Organizes multiple extensions that are separated with commas or passed by using --extension/-e multiple times. Note that the .py extension is ignored here because of the way non-*.py files are handled in ``extract`` messages (they are copied to file.ext.py files to trick xgettext to parse them as Python files). For example: running:: $ verboselib-manage extract -e js,txt -e xhtml -a would result in an extension list ``['.js', '.txt', '.xhtml']`` .. code-block:: python >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) set(['.html', '.js']) >>> handle_extensions(['.html, txt,.tpl']) set(['.html', '.tpl', '.txt']) Taken `from Django <http://bit.ly/1r7Eokw>`_ and changed a bit. """ extensions = extensions or () ignored = ignored or ('py', ) ext_list = [] for ext in extensions: ext_list.extend(ext.replace(' ', '').split(',')) for i, ext in enumerate(ext_list): if not ext.startswith('.'): ext_list[i] = '.%s' % ext_list[i] return set([x for x in ext_list if x.strip('.') not in ignored])
[ "def", "handle_extensions", "(", "extensions", "=", "None", ",", "ignored", "=", "None", ")", ":", "extensions", "=", "extensions", "or", "(", ")", "ignored", "=", "ignored", "or", "(", "'py'", ",", ")", "ext_list", "=", "[", "]", "for", "ext", "in", ...
Organizes multiple extensions that are separated with commas or passed by using --extension/-e multiple times. Note that the .py extension is ignored here because of the way non-*.py files are handled in ``extract`` messages (they are copied to file.ext.py files to trick xgettext to parse them as Python files). For example: running:: $ verboselib-manage extract -e js,txt -e xhtml -a would result in an extension list ``['.js', '.txt', '.xhtml']`` .. code-block:: python >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) set(['.html', '.js']) >>> handle_extensions(['.html, txt,.tpl']) set(['.html', '.tpl', '.txt']) Taken `from Django <http://bit.ly/1r7Eokw>`_ and changed a bit.
[ "Organizes", "multiple", "extensions", "that", "are", "separated", "with", "commas", "or", "passed", "by", "using", "--", "extension", "/", "-", "e", "multiple", "times", ".", "Note", "that", "the", ".", "py", "extension", "is", "ignored", "here", "because",...
train
https://github.com/oblalex/verboselib/blob/3c108bef060b091e1f7c08861ab07672c87ddcff/verboselib/management/utils.py#L50-L82
oblalex/verboselib
verboselib/management/utils.py
popen_wrapper
def popen_wrapper(args): """ Friendly wrapper around Popen. Returns stdout output, stderr output and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True) except OSError as e: raise OSError( "Error executing '{:}': '{:}'".format(args[0], e.strerror)) output, errors = p.communicate() return ( output, text_type(errors), p.returncode )
python
def popen_wrapper(args): """ Friendly wrapper around Popen. Returns stdout output, stderr output and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True) except OSError as e: raise OSError( "Error executing '{:}': '{:}'".format(args[0], e.strerror)) output, errors = p.communicate() return ( output, text_type(errors), p.returncode )
[ "def", "popen_wrapper", "(", "args", ")", ":", "try", ":", "p", "=", "Popen", "(", "args", ",", "shell", "=", "False", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "close_fds", "=", "os", ".", "name", "!=", "'nt'", ",", "universal_n...
Friendly wrapper around Popen. Returns stdout output, stderr output and OS status code.
[ "Friendly", "wrapper", "around", "Popen", "." ]
train
https://github.com/oblalex/verboselib/blob/3c108bef060b091e1f7c08861ab07672c87ddcff/verboselib/management/utils.py#L85-L106
walchko/update
update/update.py
cmd
def cmd(str, print_ret=False, usr_pwd=None, run=True): """ Executes a command and throws an exception on error. in: str - command print_ret - print command return usr_pwd - execute command as another user (user_name, password) run - really execute command? out: returns the command output """ if usr_pwd: str = 'echo {} | sudo -u {} {} '.format(usr_pwd[1], usr_pwd[0], str) print(' [>] {}'.format(str)) if run: err, ret = commands.getstatusoutput(str) else: err = None ret = None if err: print(' [x] {}'.format(ret)) raise Exception(ret) if ret and print_ret: lines = ret.split('\n') for line in lines: print(' [<] {}'.format(line)) return ret
python
def cmd(str, print_ret=False, usr_pwd=None, run=True): """ Executes a command and throws an exception on error. in: str - command print_ret - print command return usr_pwd - execute command as another user (user_name, password) run - really execute command? out: returns the command output """ if usr_pwd: str = 'echo {} | sudo -u {} {} '.format(usr_pwd[1], usr_pwd[0], str) print(' [>] {}'.format(str)) if run: err, ret = commands.getstatusoutput(str) else: err = None ret = None if err: print(' [x] {}'.format(ret)) raise Exception(ret) if ret and print_ret: lines = ret.split('\n') for line in lines: print(' [<] {}'.format(line)) return ret
[ "def", "cmd", "(", "str", ",", "print_ret", "=", "False", ",", "usr_pwd", "=", "None", ",", "run", "=", "True", ")", ":", "if", "usr_pwd", ":", "str", "=", "'echo {} | sudo -u {} {} '", ".", "format", "(", "usr_pwd", "[", "1", "]", ",", "usr_pwd", "[...
Executes a command and throws an exception on error. in: str - command print_ret - print command return usr_pwd - execute command as another user (user_name, password) run - really execute command? out: returns the command output
[ "Executes", "a", "command", "and", "throws", "an", "exception", "on", "error", ".", "in", ":", "str", "-", "command", "print_ret", "-", "print", "command", "return", "usr_pwd", "-", "execute", "command", "as", "another", "user", "(", "user_name", "password",...
train
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L27-L56
walchko/update
update/update.py
getPackages
def getPackages(plist): """ Cleans up input from the command line tool and returns a list of package names """ nlist = plist.split('\n') pkgs = [] for i in nlist: if i.find('===') > 0: continue pkg = i.split()[0] if pkg == 'Warning:': continue elif pkg == 'Could': continue elif pkg == 'Some': continue elif pkg == 'You': continue elif not pkg: continue pkgs.append(pkg) print(' >> Found', len(pkgs), 'packages') return pkgs
python
def getPackages(plist): """ Cleans up input from the command line tool and returns a list of package names """ nlist = plist.split('\n') pkgs = [] for i in nlist: if i.find('===') > 0: continue pkg = i.split()[0] if pkg == 'Warning:': continue elif pkg == 'Could': continue elif pkg == 'Some': continue elif pkg == 'You': continue elif not pkg: continue pkgs.append(pkg) print(' >> Found', len(pkgs), 'packages') return pkgs
[ "def", "getPackages", "(", "plist", ")", ":", "nlist", "=", "plist", ".", "split", "(", "'\\n'", ")", "pkgs", "=", "[", "]", "for", "i", "in", "nlist", ":", "if", "i", ".", "find", "(", "'==='", ")", ">", "0", ":", "continue", "pkg", "=", "i", ...
Cleans up input from the command line tool and returns a list of package names
[ "Cleans", "up", "input", "from", "the", "command", "line", "tool", "and", "returns", "a", "list", "of", "package", "names" ]
train
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L59-L78
walchko/update
update/update.py
pip
def pip(usr_pswd=None): """ This updates one package at a time. Could do all at once: pip list --outdated | cut -d' ' -f1 | xargs pip install --upgrade """ # see if pip is installed try: cmd('which pip') except: return print('-[pip]----------') p = cmd('pip list --outdated') if not p: return pkgs = getPackages(p) # update pip and setuptools first for i, p in enumerate(pkgs): if p in ['pip', 'setuptools']: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run) pkgs.pop(i) # update the rest of them for p in pkgs: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run)
python
def pip(usr_pswd=None): """ This updates one package at a time. Could do all at once: pip list --outdated | cut -d' ' -f1 | xargs pip install --upgrade """ # see if pip is installed try: cmd('which pip') except: return print('-[pip]----------') p = cmd('pip list --outdated') if not p: return pkgs = getPackages(p) # update pip and setuptools first for i, p in enumerate(pkgs): if p in ['pip', 'setuptools']: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run) pkgs.pop(i) # update the rest of them for p in pkgs: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run)
[ "def", "pip", "(", "usr_pswd", "=", "None", ")", ":", "# see if pip is installed", "try", ":", "cmd", "(", "'which pip'", ")", "except", ":", "return", "print", "(", "'-[pip]----------'", ")", "p", "=", "cmd", "(", "'pip list --outdated'", ")", "if", "not", ...
This updates one package at a time. Could do all at once: pip list --outdated | cut -d' ' -f1 | xargs pip install --upgrade
[ "This", "updates", "one", "package", "at", "a", "time", "." ]
train
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L81-L106
walchko/update
update/update.py
brew
def brew(clean=False): """ Handle homebrew on macOS """ # see if homebrew is installed try: cmd('which brew') except: return print('-[brew]----------') cmd('brew update') p = cmd('brew outdated') if not p: return pkgs = getPackages(p) for p in pkgs: cmd('brew upgrade {}'.format(p), run=global_run) if clean: print(' > brew prune old sym links and cleanup') cmd('brew prune') cmd('brew cleanup')
python
def brew(clean=False): """ Handle homebrew on macOS """ # see if homebrew is installed try: cmd('which brew') except: return print('-[brew]----------') cmd('brew update') p = cmd('brew outdated') if not p: return pkgs = getPackages(p) for p in pkgs: cmd('brew upgrade {}'.format(p), run=global_run) if clean: print(' > brew prune old sym links and cleanup') cmd('brew prune') cmd('brew cleanup')
[ "def", "brew", "(", "clean", "=", "False", ")", ":", "# see if homebrew is installed", "try", ":", "cmd", "(", "'which brew'", ")", "except", ":", "return", "print", "(", "'-[brew]----------'", ")", "cmd", "(", "'brew update'", ")", "p", "=", "cmd", "(", "...
Handle homebrew on macOS
[ "Handle", "homebrew", "on", "macOS" ]
train
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L109-L129
walchko/update
update/update.py
kernel
def kernel(): """ Handle linux kernel update """ print('================================') print(' WARNING: upgrading the kernel') print('================================') time.sleep(5) print('-[kernel]----------') cmd('rpi-update', True) print(' >> You MUST reboot to load the new kernel <<')
python
def kernel(): """ Handle linux kernel update """ print('================================') print(' WARNING: upgrading the kernel') print('================================') time.sleep(5) print('-[kernel]----------') cmd('rpi-update', True) print(' >> You MUST reboot to load the new kernel <<')
[ "def", "kernel", "(", ")", ":", "print", "(", "'================================'", ")", "print", "(", "' WARNING: upgrading the kernel'", ")", "print", "(", "'================================'", ")", "time", ".", "sleep", "(", "5", ")", "print", "(", "'-[kernel]--...
Handle linux kernel update
[ "Handle", "linux", "kernel", "update" ]
train
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L132-L143
walchko/update
update/update.py
npm
def npm(usr_pwd=None, clean=False): """ Handle npm for Node.js """ # see if node is installed try: cmd('which npm') except: return print('-[npm]----------') # awk, ignore 1st line and grab 1st word p = cmd("npm outdated -g | awk 'NR>1 {print $1}'") if not p: return pkgs = getPackages(p) for p in pkgs: cmd('{} {}'.format('npm update -g ', p), usr_pwd=usr_pwd, run=global_run)
python
def npm(usr_pwd=None, clean=False): """ Handle npm for Node.js """ # see if node is installed try: cmd('which npm') except: return print('-[npm]----------') # awk, ignore 1st line and grab 1st word p = cmd("npm outdated -g | awk 'NR>1 {print $1}'") if not p: return pkgs = getPackages(p) for p in pkgs: cmd('{} {}'.format('npm update -g ', p), usr_pwd=usr_pwd, run=global_run)
[ "def", "npm", "(", "usr_pwd", "=", "None", ",", "clean", "=", "False", ")", ":", "# see if node is installed", "try", ":", "cmd", "(", "'which npm'", ")", "except", ":", "return", "print", "(", "'-[npm]----------'", ")", "# awk, ignore 1st line and grab 1st word",...
Handle npm for Node.js
[ "Handle", "npm", "for", "Node", ".", "js" ]
train
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L157-L173
cyberdelia/astrolabe
astrolabe/interval.py
Interval.split
def split(self): """Immediately stop the current interval and start a new interval that has a start_instant equivalent to the stop_interval of self""" self.stop() interval = Interval() interval._start_instant = self.stop_instant return interval
python
def split(self): """Immediately stop the current interval and start a new interval that has a start_instant equivalent to the stop_interval of self""" self.stop() interval = Interval() interval._start_instant = self.stop_instant return interval
[ "def", "split", "(", "self", ")", ":", "self", ".", "stop", "(", ")", "interval", "=", "Interval", "(", ")", "interval", ".", "_start_instant", "=", "self", ".", "stop_instant", "return", "interval" ]
Immediately stop the current interval and start a new interval that has a start_instant equivalent to the stop_interval of self
[ "Immediately", "stop", "the", "current", "interval", "and", "start", "a", "new", "interval", "that", "has", "a", "start_instant", "equivalent", "to", "the", "stop_interval", "of", "self" ]
train
https://github.com/cyberdelia/astrolabe/blob/c8496d330fd6fd6c7bb8f9912b684519ccb5c84e/astrolabe/interval.py#L47-L53
cyberdelia/astrolabe
astrolabe/interval.py
Interval.stop
def stop(self): """Mark the stop of the interval. Calling stop on an already stopped interval has no effect. An interval can only be stopped once. :returns: the duration if the interval is truely stopped otherwise ``False``. """ if self._start_instant is None: raise IntervalException("Attempt to stop an interval that has not started.") if self._stop_instant is None: self._stop_instant = instant() self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration return False
python
def stop(self): """Mark the stop of the interval. Calling stop on an already stopped interval has no effect. An interval can only be stopped once. :returns: the duration if the interval is truely stopped otherwise ``False``. """ if self._start_instant is None: raise IntervalException("Attempt to stop an interval that has not started.") if self._stop_instant is None: self._stop_instant = instant() self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration return False
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_start_instant", "is", "None", ":", "raise", "IntervalException", "(", "\"Attempt to stop an interval that has not started.\"", ")", "if", "self", ".", "_stop_instant", "is", "None", ":", "self", ".", "_s...
Mark the stop of the interval. Calling stop on an already stopped interval has no effect. An interval can only be stopped once. :returns: the duration if the interval is truely stopped otherwise ``False``.
[ "Mark", "the", "stop", "of", "the", "interval", "." ]
train
https://github.com/cyberdelia/astrolabe/blob/c8496d330fd6fd6c7bb8f9912b684519ccb5c84e/astrolabe/interval.py#L68-L82
cyberdelia/astrolabe
astrolabe/interval.py
Interval.duration_so_far
def duration_so_far(self): """Return how the duration so far. :returns: the duration from the time the Interval was started if the interval is running, otherwise ``False``. """ if self._start_instant is None: return False if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) return False
python
def duration_so_far(self): """Return how the duration so far. :returns: the duration from the time the Interval was started if the interval is running, otherwise ``False``. """ if self._start_instant is None: return False if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) return False
[ "def", "duration_so_far", "(", "self", ")", ":", "if", "self", ".", "_start_instant", "is", "None", ":", "return", "False", "if", "self", ".", "_stop_instant", "is", "None", ":", "return", "int", "(", "(", "instant", "(", ")", "-", "self", ".", "_start...
Return how the duration so far. :returns: the duration from the time the Interval was started if the interval is running, otherwise ``False``.
[ "Return", "how", "the", "duration", "so", "far", "." ]
train
https://github.com/cyberdelia/astrolabe/blob/c8496d330fd6fd6c7bb8f9912b684519ccb5c84e/astrolabe/interval.py#L85-L95
cyberdelia/astrolabe
astrolabe/interval.py
Interval.duration
def duration(self): """Returns the integer value of the interval, the value is in milliseconds. If the interval has not had stop called yet, it will report the number of milliseconds in the interval up to the current point in time. """ if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) if self._duration is None: self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration
python
def duration(self): """Returns the integer value of the interval, the value is in milliseconds. If the interval has not had stop called yet, it will report the number of milliseconds in the interval up to the current point in time. """ if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) if self._duration is None: self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration
[ "def", "duration", "(", "self", ")", ":", "if", "self", ".", "_stop_instant", "is", "None", ":", "return", "int", "(", "(", "instant", "(", ")", "-", "self", ".", "_start_instant", ")", "*", "1000", ")", "if", "self", ".", "_duration", "is", "None", ...
Returns the integer value of the interval, the value is in milliseconds. If the interval has not had stop called yet, it will report the number of milliseconds in the interval up to the current point in time.
[ "Returns", "the", "integer", "value", "of", "the", "interval", "the", "value", "is", "in", "milliseconds", "." ]
train
https://github.com/cyberdelia/astrolabe/blob/c8496d330fd6fd6c7bb8f9912b684519ccb5c84e/astrolabe/interval.py#L134-L144
colecrtr/django-naguine
naguine/core/fork.py
Fork.__get_preferential_value
def __get_preferential_value(self, paths, index=0): """ Returns the preferential path's value. Preferential path being the first keyword (a.k.a. path) found in the path order list created on instantiation. """ try: value = paths[self.path_preference_order[index]] except KeyError: value = self.__get_preferential_value(paths, (index + 1)) except IndexError: msg = ('Cannot fork to any of the provided path\'s values. ' 'Perhaps add a fallback path (set to `True`) in your ' 'fork\'s instantiation?') raise self.PathNotAvailable(msg) return value
python
def __get_preferential_value(self, paths, index=0): """ Returns the preferential path's value. Preferential path being the first keyword (a.k.a. path) found in the path order list created on instantiation. """ try: value = paths[self.path_preference_order[index]] except KeyError: value = self.__get_preferential_value(paths, (index + 1)) except IndexError: msg = ('Cannot fork to any of the provided path\'s values. ' 'Perhaps add a fallback path (set to `True`) in your ' 'fork\'s instantiation?') raise self.PathNotAvailable(msg) return value
[ "def", "__get_preferential_value", "(", "self", ",", "paths", ",", "index", "=", "0", ")", ":", "try", ":", "value", "=", "paths", "[", "self", ".", "path_preference_order", "[", "index", "]", "]", "except", "KeyError", ":", "value", "=", "self", ".", ...
Returns the preferential path's value. Preferential path being the first keyword (a.k.a. path) found in the path order list created on instantiation.
[ "Returns", "the", "preferential", "path", "s", "value", ".", "Preferential", "path", "being", "the", "first", "keyword", "(", "a", ".", "k", ".", "a", ".", "path", ")", "found", "in", "the", "path", "order", "list", "created", "on", "instantiation", "." ...
train
https://github.com/colecrtr/django-naguine/blob/984da05dec15a4139788831e7fc060c2b7cb7fd3/naguine/core/fork.py#L58-L75
kahowell/pywebui
bridge/pywebui/bridge/__init__.py
Bridge.import_module
def import_module(self, name): """Import a module into the bridge.""" if name not in self._objects: module = _import_module(name) self._objects[name] = module self._object_references[id(module)] = name return self._objects[name]
python
def import_module(self, name): """Import a module into the bridge.""" if name not in self._objects: module = _import_module(name) self._objects[name] = module self._object_references[id(module)] = name return self._objects[name]
[ "def", "import_module", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "_objects", ":", "module", "=", "_import_module", "(", "name", ")", "self", ".", "_objects", "[", "name", "]", "=", "module", "self", ".", "_object_refe...
Import a module into the bridge.
[ "Import", "a", "module", "into", "the", "bridge", "." ]
train
https://github.com/kahowell/pywebui/blob/91c031e784454a83bea6808517e6de4389ca6ffa/bridge/pywebui/bridge/__init__.py#L27-L33
concordusapps/alchemist
alchemist/db/model.py
_component_of
def _component_of(name): """Get the root package or module of the passed module. """ # Get the registered package this model belongs to. segments = name.split('.') while segments: # Is this name a registered package? test = '.'.join(segments) if test in settings.get('COMPONENTS', []): # This is the component we are in. return test # Remove the right-most segment. segments.pop() if not segments and '.models' in name: # No package was found to be registered; attempt to guess the # right package name; strip all occurrances of '.models' from the # pacakge name. return _component_of(name.replace('.models', ''))
python
def _component_of(name): """Get the root package or module of the passed module. """ # Get the registered package this model belongs to. segments = name.split('.') while segments: # Is this name a registered package? test = '.'.join(segments) if test in settings.get('COMPONENTS', []): # This is the component we are in. return test # Remove the right-most segment. segments.pop() if not segments and '.models' in name: # No package was found to be registered; attempt to guess the # right package name; strip all occurrances of '.models' from the # pacakge name. return _component_of(name.replace('.models', ''))
[ "def", "_component_of", "(", "name", ")", ":", "# Get the registered package this model belongs to.", "segments", "=", "name", ".", "split", "(", "'.'", ")", "while", "segments", ":", "# Is this name a registered package?", "test", "=", "'.'", ".", "join", "(", "seg...
Get the root package or module of the passed module.
[ "Get", "the", "root", "package", "or", "module", "of", "the", "passed", "module", "." ]
train
https://github.com/concordusapps/alchemist/blob/822571366271b5dca0ac8bf41df988c6a3b61432/alchemist/db/model.py#L17-L37
concordusapps/alchemist
alchemist/db/model.py
Model.save
def save(self, commit=False): """Save the changes to the model. If the model has not been persisted then it adds the model to the declared session. Then it flushes the object session and optionally commits it. """ if not has_identity(self): # Object has not been persisted to the database. session.add(self) if commit: # Commit the session as requested. session.commit() else: # Just flush the session; do not commit. session.flush()
python
def save(self, commit=False): """Save the changes to the model. If the model has not been persisted then it adds the model to the declared session. Then it flushes the object session and optionally commits it. """ if not has_identity(self): # Object has not been persisted to the database. session.add(self) if commit: # Commit the session as requested. session.commit() else: # Just flush the session; do not commit. session.flush()
[ "def", "save", "(", "self", ",", "commit", "=", "False", ")", ":", "if", "not", "has_identity", "(", "self", ")", ":", "# Object has not been persisted to the database.", "session", ".", "add", "(", "self", ")", "if", "commit", ":", "# Commit the session as requ...
Save the changes to the model. If the model has not been persisted then it adds the model to the declared session. Then it flushes the object session and optionally commits it.
[ "Save", "the", "changes", "to", "the", "model", "." ]
train
https://github.com/concordusapps/alchemist/blob/822571366271b5dca0ac8bf41df988c6a3b61432/alchemist/db/model.py#L211-L228
jbrudvik/yahooscraper
yahooscraper/fantasy/team.py
url
def url(sport, league_id, team_id, start_date=None): """ Given sport name, league_id, team_id, and optional start date (YYYY-MM-DD), return the url for the fantasy team page for that date (default: today) """ url = '%s/%s/%s/' % (SPORT_URLS[sport], league_id, team_id) if start_date is not None: url += 'team?&date=%s' % start_date return url
python
def url(sport, league_id, team_id, start_date=None): """ Given sport name, league_id, team_id, and optional start date (YYYY-MM-DD), return the url for the fantasy team page for that date (default: today) """ url = '%s/%s/%s/' % (SPORT_URLS[sport], league_id, team_id) if start_date is not None: url += 'team?&date=%s' % start_date return url
[ "def", "url", "(", "sport", ",", "league_id", ",", "team_id", ",", "start_date", "=", "None", ")", ":", "url", "=", "'%s/%s/%s/'", "%", "(", "SPORT_URLS", "[", "sport", "]", ",", "league_id", ",", "team_id", ")", "if", "start_date", "is", "not", "None"...
Given sport name, league_id, team_id, and optional start date (YYYY-MM-DD), return the url for the fantasy team page for that date (default: today)
[ "Given", "sport", "name", "league_id", "team_id", "and", "optional", "start", "date", "(", "YYYY", "-", "MM", "-", "DD", ")", "return", "the", "url", "for", "the", "fantasy", "team", "page", "for", "that", "date", "(", "default", ":", "today", ")" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/fantasy/team.py#L14-L22
jbrudvik/yahooscraper
yahooscraper/fantasy/team.py
team
def team(page): """ Return the team name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[1] except: return None
python
def team(page): """ Return the team name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[1] except: return None
[ "def", "team", "(", "page", ")", ":", "soup", "=", "BeautifulSoup", "(", "page", ")", "try", ":", "return", "soup", ".", "find", "(", "'title'", ")", ".", "text", ".", "split", "(", "' | '", ")", "[", "0", "]", ".", "split", "(", "' - '", ")", ...
Return the team name
[ "Return", "the", "team", "name" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/fantasy/team.py#L25-L33
jbrudvik/yahooscraper
yahooscraper/fantasy/team.py
league
def league(page): """ Return the league name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[0] except: return None
python
def league(page): """ Return the league name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[0] except: return None
[ "def", "league", "(", "page", ")", ":", "soup", "=", "BeautifulSoup", "(", "page", ")", "try", ":", "return", "soup", ".", "find", "(", "'title'", ")", ".", "text", ".", "split", "(", "' | '", ")", "[", "0", "]", ".", "split", "(", "' - '", ")", ...
Return the league name
[ "Return", "the", "league", "name" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/fantasy/team.py#L36-L44
jbrudvik/yahooscraper
yahooscraper/fantasy/team.py
date
def date(page): """ Return the date, nicely-formatted """ soup = BeautifulSoup(page) try: page_date = soup.find('input', attrs={'name': 'date'})['value'] parsed_date = datetime.strptime(page_date, '%Y-%m-%d') return parsed_date.strftime('%a, %b %d, %Y') except: return None
python
def date(page): """ Return the date, nicely-formatted """ soup = BeautifulSoup(page) try: page_date = soup.find('input', attrs={'name': 'date'})['value'] parsed_date = datetime.strptime(page_date, '%Y-%m-%d') return parsed_date.strftime('%a, %b %d, %Y') except: return None
[ "def", "date", "(", "page", ")", ":", "soup", "=", "BeautifulSoup", "(", "page", ")", "try", ":", "page_date", "=", "soup", ".", "find", "(", "'input'", ",", "attrs", "=", "{", "'name'", ":", "'date'", "}", ")", "[", "'value'", "]", "parsed_date", ...
Return the date, nicely-formatted
[ "Return", "the", "date", "nicely", "-", "formatted" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/fantasy/team.py#L47-L57
jbrudvik/yahooscraper
yahooscraper/fantasy/team.py
alternates
def alternates(page): """ Return iterable containing players on bench who are available to play, where each player is a dict containing: - name - details - opponent """ soup = BeautifulSoup(page) try: bench = soup.find_all('tr', class_='bench') bench_bios = [p.find('div', class_='ysf-player-name') for p in bench] names = [p.find('a').text for p in bench_bios] details = [p.find('span').text for p in bench_bios] opponents = [p.find_all('td', recursive=False)[3].text for p in bench] players = [{'name': n, 'details': d, 'opponent': o} for (n, d, o) in zip(names, details, opponents)] return [p for p in players if len(p['opponent']) > 0] except: return None
python
def alternates(page): """ Return iterable containing players on bench who are available to play, where each player is a dict containing: - name - details - opponent """ soup = BeautifulSoup(page) try: bench = soup.find_all('tr', class_='bench') bench_bios = [p.find('div', class_='ysf-player-name') for p in bench] names = [p.find('a').text for p in bench_bios] details = [p.find('span').text for p in bench_bios] opponents = [p.find_all('td', recursive=False)[3].text for p in bench] players = [{'name': n, 'details': d, 'opponent': o} for (n, d, o) in zip(names, details, opponents)] return [p for p in players if len(p['opponent']) > 0] except: return None
[ "def", "alternates", "(", "page", ")", ":", "soup", "=", "BeautifulSoup", "(", "page", ")", "try", ":", "bench", "=", "soup", ".", "find_all", "(", "'tr'", ",", "class_", "=", "'bench'", ")", "bench_bios", "=", "[", "p", ".", "find", "(", "'div'", ...
Return iterable containing players on bench who are available to play, where each player is a dict containing: - name - details - opponent
[ "Return", "iterable", "containing", "players", "on", "bench", "who", "are", "available", "to", "play", "where", "each", "player", "is", "a", "dict", "containing", ":" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/fantasy/team.py#L60-L80
jbrudvik/yahooscraper
yahooscraper/fantasy/team.py
start_active_players_path
def start_active_players_path(page): """ Return the path in the "Start Active Players" button """ soup = BeautifulSoup(page) try: return soup.find('a', href=True, text='Start Active Players')['href'] except: return None
python
def start_active_players_path(page): """ Return the path in the "Start Active Players" button """ soup = BeautifulSoup(page) try: return soup.find('a', href=True, text='Start Active Players')['href'] except: return None
[ "def", "start_active_players_path", "(", "page", ")", ":", "soup", "=", "BeautifulSoup", "(", "page", ")", "try", ":", "return", "soup", ".", "find", "(", "'a'", ",", "href", "=", "True", ",", "text", "=", "'Start Active Players'", ")", "[", "'href'", "]...
Return the path in the "Start Active Players" button
[ "Return", "the", "path", "in", "the", "Start", "Active", "Players", "button" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/fantasy/team.py#L83-L91
Synerty/peek-plugin-base
peek_plugin_base/server/PluginServerStorageEntryHookABC.py
PluginServerStorageEntryHookABC._migrateStorageSchema
def _migrateStorageSchema(self, metadata: MetaData) -> None: """ Initialise the DB This method is called by the platform between the load() and start() calls. There should be no need for a plugin to call this method it's self. :param metadata: the SQLAlchemy metadata for this plugins schema """ relDir = self._packageCfg.config.storage.alembicDir(require_string) alembicDir = os.path.join(self.rootDir, relDir) if not os.path.isdir(alembicDir): raise NotADirectoryError(alembicDir) self._dbConn = DbConnection( dbConnectString=self.platform.dbConnectString, metadata=metadata, alembicDir=alembicDir, enableCreateAll=False ) self._dbConn.migrate()
python
def _migrateStorageSchema(self, metadata: MetaData) -> None: """ Initialise the DB This method is called by the platform between the load() and start() calls. There should be no need for a plugin to call this method it's self. :param metadata: the SQLAlchemy metadata for this plugins schema """ relDir = self._packageCfg.config.storage.alembicDir(require_string) alembicDir = os.path.join(self.rootDir, relDir) if not os.path.isdir(alembicDir): raise NotADirectoryError(alembicDir) self._dbConn = DbConnection( dbConnectString=self.platform.dbConnectString, metadata=metadata, alembicDir=alembicDir, enableCreateAll=False ) self._dbConn.migrate()
[ "def", "_migrateStorageSchema", "(", "self", ",", "metadata", ":", "MetaData", ")", "->", "None", ":", "relDir", "=", "self", ".", "_packageCfg", ".", "config", ".", "storage", ".", "alembicDir", "(", "require_string", ")", "alembicDir", "=", "os", ".", "p...
Initialise the DB This method is called by the platform between the load() and start() calls. There should be no need for a plugin to call this method it's self. :param metadata: the SQLAlchemy metadata for this plugins schema
[ "Initialise", "the", "DB" ]
train
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PluginServerStorageEntryHookABC.py#L15-L36
Synerty/peek-plugin-base
peek_plugin_base/server/PluginServerStorageEntryHookABC.py
PluginServerStorageEntryHookABC.prefetchDeclarativeIds
def prefetchDeclarativeIds(self, Declarative, count) -> Deferred: """ Get PG Sequence Generator A PostGreSQL sequence generator returns a chunk of IDs for the given declarative. :return: A generator that will provide the IDs :rtype: an iterator, yielding the numbers to assign """ return self._dbConn.prefetchDeclarativeIds(Declarative=Declarative, count=count)
python
def prefetchDeclarativeIds(self, Declarative, count) -> Deferred: """ Get PG Sequence Generator A PostGreSQL sequence generator returns a chunk of IDs for the given declarative. :return: A generator that will provide the IDs :rtype: an iterator, yielding the numbers to assign """ return self._dbConn.prefetchDeclarativeIds(Declarative=Declarative, count=count)
[ "def", "prefetchDeclarativeIds", "(", "self", ",", "Declarative", ",", "count", ")", "->", "Deferred", ":", "return", "self", ".", "_dbConn", ".", "prefetchDeclarativeIds", "(", "Declarative", "=", "Declarative", ",", "count", "=", "count", ")" ]
Get PG Sequence Generator A PostGreSQL sequence generator returns a chunk of IDs for the given declarative. :return: A generator that will provide the IDs :rtype: an iterator, yielding the numbers to assign
[ "Get", "PG", "Sequence", "Generator" ]
train
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PluginServerStorageEntryHookABC.py#L62-L72
etcher-be/epab
epab/utils/_run_once.py
run_once
def run_once(func): """ Simple decorator to ensure a function is ran only once """ def _inner(*args, **kwargs): if func.__name__ in CTX.run_once: LOGGER.info('skipping %s', func.__name__) return CTX.run_once[func.__name__] LOGGER.info('running: %s', func.__name__) result = func(*args, **kwargs) CTX.run_once[func.__name__] = result return result return _inner
python
def run_once(func): """ Simple decorator to ensure a function is ran only once """ def _inner(*args, **kwargs): if func.__name__ in CTX.run_once: LOGGER.info('skipping %s', func.__name__) return CTX.run_once[func.__name__] LOGGER.info('running: %s', func.__name__) result = func(*args, **kwargs) CTX.run_once[func.__name__] = result return result return _inner
[ "def", "run_once", "(", "func", ")", ":", "def", "_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "func", ".", "__name__", "in", "CTX", ".", "run_once", ":", "LOGGER", ".", "info", "(", "'skipping %s'", ",", "func", ".", "__name...
Simple decorator to ensure a function is ran only once
[ "Simple", "decorator", "to", "ensure", "a", "function", "is", "ran", "only", "once" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_run_once.py#L13-L28
concordusapps/alchemist
alchemist/management.py
Manager.run
def run(self, commands=None, default_command=None, context=None): """ Context: A dict of namespaces as the key, and their objects as the value. Used to easily inject code into the shell's runtime env. """ if commands: self._commands.update(commands) # HACK: Overriding the old shell isn't cool. # Should do it by default. from alchemist.commands import Shell self._commands['shell'] = Shell(context=context) if default_command is not None and len(sys.argv) == 1: sys.argv.append(default_command) try: result = self.handle(sys.argv[0], sys.argv[1:]) except SystemExit as e: result = e.code sys.exit(result or 0)
python
def run(self, commands=None, default_command=None, context=None): """ Context: A dict of namespaces as the key, and their objects as the value. Used to easily inject code into the shell's runtime env. """ if commands: self._commands.update(commands) # HACK: Overriding the old shell isn't cool. # Should do it by default. from alchemist.commands import Shell self._commands['shell'] = Shell(context=context) if default_command is not None and len(sys.argv) == 1: sys.argv.append(default_command) try: result = self.handle(sys.argv[0], sys.argv[1:]) except SystemExit as e: result = e.code sys.exit(result or 0)
[ "def", "run", "(", "self", ",", "commands", "=", "None", ",", "default_command", "=", "None", ",", "context", "=", "None", ")", ":", "if", "commands", ":", "self", ".", "_commands", ".", "update", "(", "commands", ")", "# HACK: Overriding the old shell isn't...
Context: A dict of namespaces as the key, and their objects as the value. Used to easily inject code into the shell's runtime env.
[ "Context", ":", "A", "dict", "of", "namespaces", "as", "the", "key", "and", "their", "objects", "as", "the", "value", ".", "Used", "to", "easily", "inject", "code", "into", "the", "shell", "s", "runtime", "env", "." ]
train
https://github.com/concordusapps/alchemist/blob/822571366271b5dca0ac8bf41df988c6a3b61432/alchemist/management.py#L33-L55
villebro/pyhtzee
pyhtzee/scoring.py
score_x_of_a_kind_yahtzee
def score_x_of_a_kind_yahtzee(dice: List[int], min_same_faces: int) -> int: """Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces. """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return sum(dice) return 0
python
def score_x_of_a_kind_yahtzee(dice: List[int], min_same_faces: int) -> int: """Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces. """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return sum(dice) return 0
[ "def", "score_x_of_a_kind_yahtzee", "(", "dice", ":", "List", "[", "int", "]", ",", "min_same_faces", ":", "int", ")", "->", "int", ":", "for", "die", ",", "count", "in", "Counter", "(", "dice", ")", ".", "most_common", "(", "1", ")", ":", "if", "cou...
Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces.
[ "Return", "sum", "of", "dice", "if", "there", "are", "a", "minimum", "of", "equal", "min_same_faces", "dice", "otherwise", "return", "zero", ".", "Only", "works", "for", "3", "or", "more", "min_same_faces", "." ]
train
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L39-L46
villebro/pyhtzee
pyhtzee/scoring.py
score_x_of_a_kind_yatzy
def score_x_of_a_kind_yatzy(dice: List[int], min_same_faces: int) -> int: """Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return die * min_same_faces return 0
python
def score_x_of_a_kind_yatzy(dice: List[int], min_same_faces: int) -> int: """Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return die * min_same_faces return 0
[ "def", "score_x_of_a_kind_yatzy", "(", "dice", ":", "List", "[", "int", "]", ",", "min_same_faces", ":", "int", ")", "->", "int", ":", "for", "die", ",", "count", "in", "Counter", "(", "dice", ")", ".", "most_common", "(", "1", ")", ":", "if", "count...
Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces
[ "Similar", "to", "yahtzee", "but", "only", "return", "the", "sum", "of", "the", "dice", "that", "satisfy", "min_same_faces" ]
train
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L49-L55
villebro/pyhtzee
pyhtzee/scoring.py
score_small_straight_yahztee
def score_small_straight_yahztee(dice: List[int]) -> int: """ Small straight scoring according to regular yahtzee rules """ global CONSTANT_SCORES_YAHTZEE dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4}, dice_set) or \ _are_two_sets_equal({2, 3, 4, 5}, dice_set) or \ _are_two_sets_equal({3, 4, 5, 6}, dice_set): return CONSTANT_SCORES_YAHTZEE[Category.SMALL_STRAIGHT] return 0
python
def score_small_straight_yahztee(dice: List[int]) -> int: """ Small straight scoring according to regular yahtzee rules """ global CONSTANT_SCORES_YAHTZEE dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4}, dice_set) or \ _are_two_sets_equal({2, 3, 4, 5}, dice_set) or \ _are_two_sets_equal({3, 4, 5, 6}, dice_set): return CONSTANT_SCORES_YAHTZEE[Category.SMALL_STRAIGHT] return 0
[ "def", "score_small_straight_yahztee", "(", "dice", ":", "List", "[", "int", "]", ")", "->", "int", ":", "global", "CONSTANT_SCORES_YAHTZEE", "dice_set", "=", "set", "(", "dice", ")", "if", "_are_two_sets_equal", "(", "{", "1", ",", "2", ",", "3", ",", "...
Small straight scoring according to regular yahtzee rules
[ "Small", "straight", "scoring", "according", "to", "regular", "yahtzee", "rules" ]
train
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L100-L110
villebro/pyhtzee
pyhtzee/scoring.py
score_small_straight_yatzy
def score_small_straight_yatzy(dice: List[int]) -> int: """ Small straight scoring according to yatzy rules """ dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4, 5}, dice_set): return sum(dice) return 0
python
def score_small_straight_yatzy(dice: List[int]) -> int: """ Small straight scoring according to yatzy rules """ dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4, 5}, dice_set): return sum(dice) return 0
[ "def", "score_small_straight_yatzy", "(", "dice", ":", "List", "[", "int", "]", ")", "->", "int", ":", "dice_set", "=", "set", "(", "dice", ")", "if", "_are_two_sets_equal", "(", "{", "1", ",", "2", ",", "3", ",", "4", ",", "5", "}", ",", "dice_set...
Small straight scoring according to yatzy rules
[ "Small", "straight", "scoring", "according", "to", "yatzy", "rules" ]
train
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L113-L120
villebro/pyhtzee
pyhtzee/scoring.py
score_large_straight_yahtzee
def score_large_straight_yahtzee(dice: List[int]) -> int: """ Large straight scoring according to regular yahtzee rules """ global CONSTANT_SCORES_YAHTZEE dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4, 5}, dice_set) or \ _are_two_sets_equal({2, 3, 4, 5, 6}, dice_set): return CONSTANT_SCORES_YAHTZEE[Category.LARGE_STRAIGHT] return 0
python
def score_large_straight_yahtzee(dice: List[int]) -> int: """ Large straight scoring according to regular yahtzee rules """ global CONSTANT_SCORES_YAHTZEE dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4, 5}, dice_set) or \ _are_two_sets_equal({2, 3, 4, 5, 6}, dice_set): return CONSTANT_SCORES_YAHTZEE[Category.LARGE_STRAIGHT] return 0
[ "def", "score_large_straight_yahtzee", "(", "dice", ":", "List", "[", "int", "]", ")", "->", "int", ":", "global", "CONSTANT_SCORES_YAHTZEE", "dice_set", "=", "set", "(", "dice", ")", "if", "_are_two_sets_equal", "(", "{", "1", ",", "2", ",", "3", ",", "...
Large straight scoring according to regular yahtzee rules
[ "Large", "straight", "scoring", "according", "to", "regular", "yahtzee", "rules" ]
train
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L130-L139
villebro/pyhtzee
pyhtzee/scoring.py
score_large_straight_yatzy
def score_large_straight_yatzy(dice: List[int]) -> int: """ Large straight scoring according to yatzy rules """ dice_set = set(dice) if _are_two_sets_equal({2, 3, 4, 5, 6}, dice_set): return sum(dice) return 0
python
def score_large_straight_yatzy(dice: List[int]) -> int: """ Large straight scoring according to yatzy rules """ dice_set = set(dice) if _are_two_sets_equal({2, 3, 4, 5, 6}, dice_set): return sum(dice) return 0
[ "def", "score_large_straight_yatzy", "(", "dice", ":", "List", "[", "int", "]", ")", "->", "int", ":", "dice_set", "=", "set", "(", "dice", ")", "if", "_are_two_sets_equal", "(", "{", "2", ",", "3", ",", "4", ",", "5", ",", "6", "}", ",", "dice_set...
Large straight scoring according to yatzy rules
[ "Large", "straight", "scoring", "according", "to", "yatzy", "rules" ]
train
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L142-L149
cloudboss/friend
friend/collections.py
select_dict
def select_dict(coll, key, value): """ Given an iterable of dictionaries, return the dictionaries where the values at a given key match the given value. If the value is an iterable of objects, the function will consider any to be a match. This is especially useful when calling REST APIs which return arrays of JSON objects. When such a response is converted to a Python list of dictionaries, it may be easily filtered using this function. :param iter coll: An iterable containing dictionaries :param obj key: A key to search in each dictionary :param value: A value or iterable of values to match :type value: obj or iter :returns: A list of dictionaries matching the query :rtype: list :Example: :: >>> dicts = [ ... {'hi': 'bye'}, ... {10: 2, 30: 4}, ... {'hi': 'hello', 'bye': 'goodbye'}, ... ] >>> select_dict(dicts, 'hi', 'bye') [{'hi': 'bye'}] >>> select_dict(dicts, 'hi', ('bye', 'hello')) [{'hi': 'bye'}, {'hi': 'hello', 'bye': 'goodbye'}] """ if getattr(value, '__iter__', None): iterable = value else: iterable = [value] return [v for v in coll if key in v and v[key] in iterable]
python
def select_dict(coll, key, value): """ Given an iterable of dictionaries, return the dictionaries where the values at a given key match the given value. If the value is an iterable of objects, the function will consider any to be a match. This is especially useful when calling REST APIs which return arrays of JSON objects. When such a response is converted to a Python list of dictionaries, it may be easily filtered using this function. :param iter coll: An iterable containing dictionaries :param obj key: A key to search in each dictionary :param value: A value or iterable of values to match :type value: obj or iter :returns: A list of dictionaries matching the query :rtype: list :Example: :: >>> dicts = [ ... {'hi': 'bye'}, ... {10: 2, 30: 4}, ... {'hi': 'hello', 'bye': 'goodbye'}, ... ] >>> select_dict(dicts, 'hi', 'bye') [{'hi': 'bye'}] >>> select_dict(dicts, 'hi', ('bye', 'hello')) [{'hi': 'bye'}, {'hi': 'hello', 'bye': 'goodbye'}] """ if getattr(value, '__iter__', None): iterable = value else: iterable = [value] return [v for v in coll if key in v and v[key] in iterable]
[ "def", "select_dict", "(", "coll", ",", "key", ",", "value", ")", ":", "if", "getattr", "(", "value", ",", "'__iter__'", ",", "None", ")", ":", "iterable", "=", "value", "else", ":", "iterable", "=", "[", "value", "]", "return", "[", "v", "for", "v...
Given an iterable of dictionaries, return the dictionaries where the values at a given key match the given value. If the value is an iterable of objects, the function will consider any to be a match. This is especially useful when calling REST APIs which return arrays of JSON objects. When such a response is converted to a Python list of dictionaries, it may be easily filtered using this function. :param iter coll: An iterable containing dictionaries :param obj key: A key to search in each dictionary :param value: A value or iterable of values to match :type value: obj or iter :returns: A list of dictionaries matching the query :rtype: list :Example: :: >>> dicts = [ ... {'hi': 'bye'}, ... {10: 2, 30: 4}, ... {'hi': 'hello', 'bye': 'goodbye'}, ... ] >>> select_dict(dicts, 'hi', 'bye') [{'hi': 'bye'}] >>> select_dict(dicts, 'hi', ('bye', 'hello')) [{'hi': 'bye'}, {'hi': 'hello', 'bye': 'goodbye'}]
[ "Given", "an", "iterable", "of", "dictionaries", "return", "the", "dictionaries", "where", "the", "values", "at", "a", "given", "key", "match", "the", "given", "value", ".", "If", "the", "value", "is", "an", "iterable", "of", "objects", "the", "function", ...
train
https://github.com/cloudboss/friend/blob/3357e6ec849552e3ae9ed28017ff0926e4006e4e/friend/collections.py#L22-L59
azraq27/neural
neural/qc.py
inside_brain
def inside_brain(stat_dset,atlas=None,p=0.001): '''calculates the percentage of voxels above a statistical threshold inside a brain mask vs. outside it if ``atlas`` is ``None``, it will try to find ``TT_N27``''' atlas = find_atlas(atlas) if atlas==None: return None mask_dset = nl.suffix(stat_dset,'_atlasfrac') nl.run(['3dfractionize','-template',nl.strip_subbrick(stat_dset),'-input',nl.calc([atlas],'1+step(a-100)',datum='short'),'-preserve','-clip','0.2','-prefix',mask_dset],products=mask_dset,quiet=True,stderr=None) s = nl.roi_stats(mask_dset,nl.thresh(stat_dset,p)) return 100.0 * s[2]['nzvoxels'] / (s[1]['nzvoxels'] + s[2]['nzvoxels'])
python
def inside_brain(stat_dset,atlas=None,p=0.001): '''calculates the percentage of voxels above a statistical threshold inside a brain mask vs. outside it if ``atlas`` is ``None``, it will try to find ``TT_N27``''' atlas = find_atlas(atlas) if atlas==None: return None mask_dset = nl.suffix(stat_dset,'_atlasfrac') nl.run(['3dfractionize','-template',nl.strip_subbrick(stat_dset),'-input',nl.calc([atlas],'1+step(a-100)',datum='short'),'-preserve','-clip','0.2','-prefix',mask_dset],products=mask_dset,quiet=True,stderr=None) s = nl.roi_stats(mask_dset,nl.thresh(stat_dset,p)) return 100.0 * s[2]['nzvoxels'] / (s[1]['nzvoxels'] + s[2]['nzvoxels'])
[ "def", "inside_brain", "(", "stat_dset", ",", "atlas", "=", "None", ",", "p", "=", "0.001", ")", ":", "atlas", "=", "find_atlas", "(", "atlas", ")", "if", "atlas", "==", "None", ":", "return", "None", "mask_dset", "=", "nl", ".", "suffix", "(", "stat...
calculates the percentage of voxels above a statistical threshold inside a brain mask vs. outside it if ``atlas`` is ``None``, it will try to find ``TT_N27``
[ "calculates", "the", "percentage", "of", "voxels", "above", "a", "statistical", "threshold", "inside", "a", "brain", "mask", "vs", ".", "outside", "it", "if", "atlas", "is", "None", "it", "will", "try", "to", "find", "TT_N27" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/qc.py#L17-L27
azraq27/neural
neural/qc.py
atlas_overlap
def atlas_overlap(dset,atlas=None): '''aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``''' atlas = find_atlas(atlas) if atlas==None: return None cost_func = 'crM' infile = os.path.abspath(dset) tmpdir = tempfile.mkdtemp() with nl.run_in(tmpdir): o = nl.run(['3dAllineate','-verb','-base',atlas,'-source',infile + '[0]','-NN','-final','NN','-cost',cost_func,'-nmatch','20%','-onepass','-fineblur','2','-cmass','-prefix','test.nii.gz']) m = re.search(r'Final\s+cost = ([\d.]+) ;',o.output) if m: cost = float(m.group(1)) o = nl.run(['3dmaskave','-mask',atlas,'-q','test.nii.gz'],stderr=None) data_thresh = float(o.output) / 4 i = nl.dset_info('test.nii.gz') o = nl.run(['3dmaskave','-q','-mask','SELF','-sum',nl.calc([atlas,'test.nii.gz'],'equals(step(a-10),step(b-%.2f))'%data_thresh)],stderr=None) overlap = 100*float(o.output) / (i.voxel_dims[0]*i.voxel_dims[1]*i.voxel_dims[2]) try: shutil.rmtree(tmpdir) except: pass return (cost,overlap)
python
def atlas_overlap(dset,atlas=None): '''aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``''' atlas = find_atlas(atlas) if atlas==None: return None cost_func = 'crM' infile = os.path.abspath(dset) tmpdir = tempfile.mkdtemp() with nl.run_in(tmpdir): o = nl.run(['3dAllineate','-verb','-base',atlas,'-source',infile + '[0]','-NN','-final','NN','-cost',cost_func,'-nmatch','20%','-onepass','-fineblur','2','-cmass','-prefix','test.nii.gz']) m = re.search(r'Final\s+cost = ([\d.]+) ;',o.output) if m: cost = float(m.group(1)) o = nl.run(['3dmaskave','-mask',atlas,'-q','test.nii.gz'],stderr=None) data_thresh = float(o.output) / 4 i = nl.dset_info('test.nii.gz') o = nl.run(['3dmaskave','-q','-mask','SELF','-sum',nl.calc([atlas,'test.nii.gz'],'equals(step(a-10),step(b-%.2f))'%data_thresh)],stderr=None) overlap = 100*float(o.output) / (i.voxel_dims[0]*i.voxel_dims[1]*i.voxel_dims[2]) try: shutil.rmtree(tmpdir) except: pass return (cost,overlap)
[ "def", "atlas_overlap", "(", "dset", ",", "atlas", "=", "None", ")", ":", "atlas", "=", "find_atlas", "(", "atlas", ")", "if", "atlas", "==", "None", ":", "return", "None", "cost_func", "=", "'crM'", "infile", "=", "os", ".", "path", ".", "abspath", ...
aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``
[ "aligns", "dset", "to", "the", "TT_N27", "atlas", "and", "returns", "(", "cost", "overlap", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/qc.py#L29-L52
azraq27/neural
neural/qc.py
outcount
def outcount(dset,fraction=0.1): '''gets outlier count and returns ``(list of proportion of outliers by timepoint,total percentage of outlier time points)''' polort = nl.auto_polort(dset) info = nl.dset_info(dset) o = nl.run(['3dToutcount','-fraction','-automask','-polort',polort,dset],stderr=None,quiet=None) if o.return_code==0 and o.output: oc = [float(x) for x in o.output.split('\n') if x.strip()!=''] binary_outcount = [x<fraction for x in oc] perc_outliers = 1 - (sum(binary_outcount)/float(info.reps)) return (oc,perc_outliers)
python
def outcount(dset,fraction=0.1): '''gets outlier count and returns ``(list of proportion of outliers by timepoint,total percentage of outlier time points)''' polort = nl.auto_polort(dset) info = nl.dset_info(dset) o = nl.run(['3dToutcount','-fraction','-automask','-polort',polort,dset],stderr=None,quiet=None) if o.return_code==0 and o.output: oc = [float(x) for x in o.output.split('\n') if x.strip()!=''] binary_outcount = [x<fraction for x in oc] perc_outliers = 1 - (sum(binary_outcount)/float(info.reps)) return (oc,perc_outliers)
[ "def", "outcount", "(", "dset", ",", "fraction", "=", "0.1", ")", ":", "polort", "=", "nl", ".", "auto_polort", "(", "dset", ")", "info", "=", "nl", ".", "dset_info", "(", "dset", ")", "o", "=", "nl", ".", "run", "(", "[", "'3dToutcount'", ",", "...
gets outlier count and returns ``(list of proportion of outliers by timepoint,total percentage of outlier time points)
[ "gets", "outlier", "count", "and", "returns", "(", "list", "of", "proportion", "of", "outliers", "by", "timepoint", "total", "percentage", "of", "outlier", "time", "points", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/qc.py#L54-L63
azraq27/neural
neural/qc.py
temporal_snr
def temporal_snr(signal_dset,noise_dset,mask=None,prefix='temporal_snr.nii.gz'): '''Calculates temporal SNR by dividing average signal of ``signal_dset`` by SD of ``noise_dset``. ``signal_dset`` should be a dataset that contains the average signal value (i.e., nothing that has been detrended by removing the mean), and ``noise_dset`` should be a dataset that has all possible known signal fluctuations (e.g., task-related effects) removed from it (the residual dataset from a deconvolve works well)''' for d in [('mean',signal_dset), ('stdev',noise_dset)]: new_d = nl.suffix(d[1],'_%s' % d[0]) cmd = ['3dTstat','-%s' % d[0],'-prefix',new_d] if mask: cmd += ['-mask',mask] cmd += [d[1]] nl.run(cmd,products=new_d) nl.calc([nl.suffix(signal_dset,'_mean'),nl.suffix(noise_dset,'_stdev')],'a/b',prefix=prefix)
python
def temporal_snr(signal_dset,noise_dset,mask=None,prefix='temporal_snr.nii.gz'): '''Calculates temporal SNR by dividing average signal of ``signal_dset`` by SD of ``noise_dset``. ``signal_dset`` should be a dataset that contains the average signal value (i.e., nothing that has been detrended by removing the mean), and ``noise_dset`` should be a dataset that has all possible known signal fluctuations (e.g., task-related effects) removed from it (the residual dataset from a deconvolve works well)''' for d in [('mean',signal_dset), ('stdev',noise_dset)]: new_d = nl.suffix(d[1],'_%s' % d[0]) cmd = ['3dTstat','-%s' % d[0],'-prefix',new_d] if mask: cmd += ['-mask',mask] cmd += [d[1]] nl.run(cmd,products=new_d) nl.calc([nl.suffix(signal_dset,'_mean'),nl.suffix(noise_dset,'_stdev')],'a/b',prefix=prefix)
[ "def", "temporal_snr", "(", "signal_dset", ",", "noise_dset", ",", "mask", "=", "None", ",", "prefix", "=", "'temporal_snr.nii.gz'", ")", ":", "for", "d", "in", "[", "(", "'mean'", ",", "signal_dset", ")", ",", "(", "'stdev'", ",", "noise_dset", ")", "]"...
Calculates temporal SNR by dividing average signal of ``signal_dset`` by SD of ``noise_dset``. ``signal_dset`` should be a dataset that contains the average signal value (i.e., nothing that has been detrended by removing the mean), and ``noise_dset`` should be a dataset that has all possible known signal fluctuations (e.g., task-related effects) removed from it (the residual dataset from a deconvolve works well)
[ "Calculates", "temporal", "SNR", "by", "dividing", "average", "signal", "of", "signal_dset", "by", "SD", "of", "noise_dset", ".", "signal_dset", "should", "be", "a", "dataset", "that", "contains", "the", "average", "signal", "value", "(", "i", ".", "e", ".",...
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/qc.py#L65-L78
azraq27/neural
neural/qc.py
auto_qc
def auto_qc(dset,inside_perc=60,atlas=None,p=0.001): '''returns ``False`` if ``dset`` fails minimum checks, or returns a float from ``0.0`` to ``100.0`` describing data quality''' with nl.notify('Running quality check on %s:' % dset): if not os.path.exists(dset): nl.notify('Error: cannot find the file!',level=nl.level.error) return False info = nl.dset_info(dset) if not info: nl.notify('Error: could not read the dataset!',level=nl.level.error) if any(['stat' in x for x in info.subbricks]): with nl.notify('Statistical results detected...'): inside = inside_brain(dset,atlas=atlas,p=p) nl.notify('%.1f significant voxels inside brain') if inside<inside_perc: nl.notify('Warning: below quality threshold!',level=nl.level.warning) # return False nl.notify('Looks ok') return inside if len(info.subbricks)>1: with nl.notify('Time-series detected...'): return_val = True (cost,overlap) = atlas_overlap(dset) if cost>0.15 or overlap<80: nl.notify('Warning: does not appear to conform to brain dimensions',level=nl.level.warning) return_val = False if len(info.subbricks)>5: (oc,perc_outliers) = outcount(dset) if perc_outliers>0.1: nl.notify('Warning: large amount of outlier time points',level=nl.level.warning) return_val = False if return_val: nl.notify('Looks ok') return min(100*(1-cost),overlap,100*perc_outliers) return False with nl.notify('Single brain image detected...'): (cost,overlap) = atlas_overlap(dset) # Be more lenient if it's not an EPI, cuz who knows what else is in this image if cost>0.45 or overlap<70: nl.notify('Warning: does not appear to conform to brain dimensions',level=nl.level.warning) return False nl.notify('Looks ok') return min(100*(1-cost),overlap)
python
def auto_qc(dset,inside_perc=60,atlas=None,p=0.001): '''returns ``False`` if ``dset`` fails minimum checks, or returns a float from ``0.0`` to ``100.0`` describing data quality''' with nl.notify('Running quality check on %s:' % dset): if not os.path.exists(dset): nl.notify('Error: cannot find the file!',level=nl.level.error) return False info = nl.dset_info(dset) if not info: nl.notify('Error: could not read the dataset!',level=nl.level.error) if any(['stat' in x for x in info.subbricks]): with nl.notify('Statistical results detected...'): inside = inside_brain(dset,atlas=atlas,p=p) nl.notify('%.1f significant voxels inside brain') if inside<inside_perc: nl.notify('Warning: below quality threshold!',level=nl.level.warning) # return False nl.notify('Looks ok') return inside if len(info.subbricks)>1: with nl.notify('Time-series detected...'): return_val = True (cost,overlap) = atlas_overlap(dset) if cost>0.15 or overlap<80: nl.notify('Warning: does not appear to conform to brain dimensions',level=nl.level.warning) return_val = False if len(info.subbricks)>5: (oc,perc_outliers) = outcount(dset) if perc_outliers>0.1: nl.notify('Warning: large amount of outlier time points',level=nl.level.warning) return_val = False if return_val: nl.notify('Looks ok') return min(100*(1-cost),overlap,100*perc_outliers) return False with nl.notify('Single brain image detected...'): (cost,overlap) = atlas_overlap(dset) # Be more lenient if it's not an EPI, cuz who knows what else is in this image if cost>0.45 or overlap<70: nl.notify('Warning: does not appear to conform to brain dimensions',level=nl.level.warning) return False nl.notify('Looks ok') return min(100*(1-cost),overlap)
[ "def", "auto_qc", "(", "dset", ",", "inside_perc", "=", "60", ",", "atlas", "=", "None", ",", "p", "=", "0.001", ")", ":", "with", "nl", ".", "notify", "(", "'Running quality check on %s:'", "%", "dset", ")", ":", "if", "not", "os", ".", "path", ".",...
returns ``False`` if ``dset`` fails minimum checks, or returns a float from ``0.0`` to ``100.0`` describing data quality
[ "returns", "False", "if", "dset", "fails", "minimum", "checks", "or", "returns", "a", "float", "from", "0", ".", "0", "to", "100", ".", "0", "describing", "data", "quality" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/qc.py#L80-L125
pyschool/story
story/utils.py
lazy
def lazy(func, *resultclasses): """ Turn any callable into a lazy evaluated callable. result classes or types is required -- at least one is needed so that the automatic forcing of the lazy evaluation code is triggered. Results are not memoized; the function is evaluated on every access. """ @total_ordering class __proxy__(Promise): """ Encapsulate a function call and act as a proxy for methods that are called on the result of that function. The function is not evaluated until one of the methods on the result is called. """ __prepared = False def __init__(self, args, kw): self.__args = args self.__kw = kw if not self.__prepared: self.__prepare_class__() self.__prepared = True def __reduce__(self): return ( _lazy_proxy_unpickle, (func, self.__args, self.__kw) + resultclasses ) def __repr__(self): return repr(self.__cast()) @classmethod def __prepare_class__(cls): for resultclass in resultclasses: for type_ in resultclass.mro(): for method_name in type_.__dict__.keys(): # All __promise__ return the same wrapper method, they # look up the correct implementation when called. if hasattr(cls, method_name): continue meth = cls.__promise__(method_name) setattr(cls, method_name, meth) cls._delegate_bytes = bytes in resultclasses cls._delegate_text = str in resultclasses assert not (cls._delegate_bytes and cls._delegate_text), ( "Cannot call lazy() with both bytes and text return types.") if cls._delegate_text: cls.__str__ = cls.__text_cast elif cls._delegate_bytes: cls.__bytes__ = cls.__bytes_cast @classmethod def __promise__(cls, method_name): # Builds a wrapper around some magic method def __wrapper__(self, *args, **kw): # Automatically triggers the evaluation of a lazy value and # applies the given magic method of the result type. res = func(*self.__args, **self.__kw) return getattr(res, method_name)(*args, **kw) return __wrapper__ def __text_cast(self): return func(*self.__args, **self.__kw) def __bytes_cast(self): return bytes(func(*self.__args, **self.__kw)) def __bytes_cast_encoded(self): return func(*self.__args, **self.__kw).encode() def __cast(self): if self._delegate_bytes: return self.__bytes_cast() elif self._delegate_text: return self.__text_cast() else: return func(*self.__args, **self.__kw) def __str__(self): # object defines __str__(), so __prepare_class__() won't overload # a __str__() method from the proxied class. return str(self.__cast()) def __eq__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() == other def __lt__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() < other def __hash__(self): return hash(self.__cast()) def __mod__(self, rhs): if self._delegate_text: return str(self) % rhs return self.__cast() % rhs def __deepcopy__(self, memo): # Instances of this class are effectively immutable. It's just a # collection of functions. So we don't need to do anything # complicated for copying. memo[id(self)] = self return self @wraps(func) def __wrapper__(*args, **kw): # Creates the proxy object, instead of the actual value. return __proxy__(args, kw) return __wrapper__
python
def lazy(func, *resultclasses): """ Turn any callable into a lazy evaluated callable. result classes or types is required -- at least one is needed so that the automatic forcing of the lazy evaluation code is triggered. Results are not memoized; the function is evaluated on every access. """ @total_ordering class __proxy__(Promise): """ Encapsulate a function call and act as a proxy for methods that are called on the result of that function. The function is not evaluated until one of the methods on the result is called. """ __prepared = False def __init__(self, args, kw): self.__args = args self.__kw = kw if not self.__prepared: self.__prepare_class__() self.__prepared = True def __reduce__(self): return ( _lazy_proxy_unpickle, (func, self.__args, self.__kw) + resultclasses ) def __repr__(self): return repr(self.__cast()) @classmethod def __prepare_class__(cls): for resultclass in resultclasses: for type_ in resultclass.mro(): for method_name in type_.__dict__.keys(): # All __promise__ return the same wrapper method, they # look up the correct implementation when called. if hasattr(cls, method_name): continue meth = cls.__promise__(method_name) setattr(cls, method_name, meth) cls._delegate_bytes = bytes in resultclasses cls._delegate_text = str in resultclasses assert not (cls._delegate_bytes and cls._delegate_text), ( "Cannot call lazy() with both bytes and text return types.") if cls._delegate_text: cls.__str__ = cls.__text_cast elif cls._delegate_bytes: cls.__bytes__ = cls.__bytes_cast @classmethod def __promise__(cls, method_name): # Builds a wrapper around some magic method def __wrapper__(self, *args, **kw): # Automatically triggers the evaluation of a lazy value and # applies the given magic method of the result type. res = func(*self.__args, **self.__kw) return getattr(res, method_name)(*args, **kw) return __wrapper__ def __text_cast(self): return func(*self.__args, **self.__kw) def __bytes_cast(self): return bytes(func(*self.__args, **self.__kw)) def __bytes_cast_encoded(self): return func(*self.__args, **self.__kw).encode() def __cast(self): if self._delegate_bytes: return self.__bytes_cast() elif self._delegate_text: return self.__text_cast() else: return func(*self.__args, **self.__kw) def __str__(self): # object defines __str__(), so __prepare_class__() won't overload # a __str__() method from the proxied class. return str(self.__cast()) def __eq__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() == other def __lt__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() < other def __hash__(self): return hash(self.__cast()) def __mod__(self, rhs): if self._delegate_text: return str(self) % rhs return self.__cast() % rhs def __deepcopy__(self, memo): # Instances of this class are effectively immutable. It's just a # collection of functions. So we don't need to do anything # complicated for copying. memo[id(self)] = self return self @wraps(func) def __wrapper__(*args, **kw): # Creates the proxy object, instead of the actual value. return __proxy__(args, kw) return __wrapper__
[ "def", "lazy", "(", "func", ",", "*", "resultclasses", ")", ":", "@", "total_ordering", "class", "__proxy__", "(", "Promise", ")", ":", "\"\"\"\n Encapsulate a function call and act as a proxy for methods that are\n called on the result of that function. The function ...
Turn any callable into a lazy evaluated callable. result classes or types is required -- at least one is needed so that the automatic forcing of the lazy evaluation code is triggered. Results are not memoized; the function is evaluated on every access.
[ "Turn", "any", "callable", "into", "a", "lazy", "evaluated", "callable", ".", "result", "classes", "or", "types", "is", "required", "--", "at", "least", "one", "is", "needed", "so", "that", "the", "automatic", "forcing", "of", "the", "lazy", "evaluation", ...
train
https://github.com/pyschool/story/blob/c23daf4a187b0df4cbae88ef06b36c396f1ffd57/story/utils.py#L12-L127
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.reconnect
def reconnect(self, conn_retries=None): ''' Connects to Redis with a exponential waiting (3**n) ''' if conn_retries is None: conn_retries = self.conn_retries count = 0 if self.logger: self.logger.info('Connecting to Redis..') while count < conn_retries: super(redis.client.Redis, self).__init__(*self.args, **self.kwargs) if self.ping(): if self.logger: self.logger.info('Connected to Redis!') return True else: sl = min(3 ** count, self.max_sleep) if self.logger: self.logger.info('Connecting failed, retrying in {0} seconds'.format(sl)) time.sleep(sl) count += 1 raise ConnectionError
python
def reconnect(self, conn_retries=None): ''' Connects to Redis with a exponential waiting (3**n) ''' if conn_retries is None: conn_retries = self.conn_retries count = 0 if self.logger: self.logger.info('Connecting to Redis..') while count < conn_retries: super(redis.client.Redis, self).__init__(*self.args, **self.kwargs) if self.ping(): if self.logger: self.logger.info('Connected to Redis!') return True else: sl = min(3 ** count, self.max_sleep) if self.logger: self.logger.info('Connecting failed, retrying in {0} seconds'.format(sl)) time.sleep(sl) count += 1 raise ConnectionError
[ "def", "reconnect", "(", "self", ",", "conn_retries", "=", "None", ")", ":", "if", "conn_retries", "is", "None", ":", "conn_retries", "=", "self", ".", "conn_retries", "count", "=", "0", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "in...
Connects to Redis with a exponential waiting (3**n)
[ "Connects", "to", "Redis", "with", "a", "exponential", "waiting", "(", "3", "**", "n", ")" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L59-L80
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis._multi_lpop_pipeline
def _multi_lpop_pipeline(self, pipe, queue, number): ''' Pops multiple elements from a list in a given pipeline''' pipe.lrange(queue, 0, number - 1) pipe.ltrim(queue, number, -1)
python
def _multi_lpop_pipeline(self, pipe, queue, number): ''' Pops multiple elements from a list in a given pipeline''' pipe.lrange(queue, 0, number - 1) pipe.ltrim(queue, number, -1)
[ "def", "_multi_lpop_pipeline", "(", "self", ",", "pipe", ",", "queue", ",", "number", ")", ":", "pipe", ".", "lrange", "(", "queue", ",", "0", ",", "number", "-", "1", ")", "pipe", ".", "ltrim", "(", "queue", ",", "number", ",", "-", "1", ")" ]
Pops multiple elements from a list in a given pipeline
[ "Pops", "multiple", "elements", "from", "a", "list", "in", "a", "given", "pipeline" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L101-L104
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.multi_lpop
def multi_lpop(self, queue, number, transaction=False): ''' Pops multiple elements from a list This operation will be atomic if transaction=True is passed ''' try: pipe = self.pipeline(transaction=transaction) pipe.multi() self._multi_lpop_pipeline(pipe, queue, number) return pipe.execute()[0] except IndexError: return [] except: raise
python
def multi_lpop(self, queue, number, transaction=False): ''' Pops multiple elements from a list This operation will be atomic if transaction=True is passed ''' try: pipe = self.pipeline(transaction=transaction) pipe.multi() self._multi_lpop_pipeline(pipe, queue, number) return pipe.execute()[0] except IndexError: return [] except: raise
[ "def", "multi_lpop", "(", "self", ",", "queue", ",", "number", ",", "transaction", "=", "False", ")", ":", "try", ":", "pipe", "=", "self", ".", "pipeline", "(", "transaction", "=", "transaction", ")", "pipe", ".", "multi", "(", ")", "self", ".", "_m...
Pops multiple elements from a list This operation will be atomic if transaction=True is passed
[ "Pops", "multiple", "elements", "from", "a", "list", "This", "operation", "will", "be", "atomic", "if", "transaction", "=", "True", "is", "passed" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L106-L118
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis._multi_rpush_pipeline
def _multi_rpush_pipeline(self, pipe, queue, values, bulk_size=0): ''' Pushes multiple elements to a list in a given pipeline If bulk_size is set it will execute the pipeline every bulk_size elements ''' cont = 0 for value in values: pipe.rpush(queue, value) if bulk_size != 0 and cont % bulk_size == 0: pipe.execute()
python
def _multi_rpush_pipeline(self, pipe, queue, values, bulk_size=0): ''' Pushes multiple elements to a list in a given pipeline If bulk_size is set it will execute the pipeline every bulk_size elements ''' cont = 0 for value in values: pipe.rpush(queue, value) if bulk_size != 0 and cont % bulk_size == 0: pipe.execute()
[ "def", "_multi_rpush_pipeline", "(", "self", ",", "pipe", ",", "queue", ",", "values", ",", "bulk_size", "=", "0", ")", ":", "cont", "=", "0", "for", "value", "in", "values", ":", "pipe", ".", "rpush", "(", "queue", ",", "value", ")", "if", "bulk_siz...
Pushes multiple elements to a list in a given pipeline If bulk_size is set it will execute the pipeline every bulk_size elements
[ "Pushes", "multiple", "elements", "to", "a", "list", "in", "a", "given", "pipeline", "If", "bulk_size", "is", "set", "it", "will", "execute", "the", "pipeline", "every", "bulk_size", "elements" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L120-L128
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.multi_rpush
def multi_rpush(self, queue, values, bulk_size=0, transaction=False): ''' Pushes multiple elements to a list If bulk_size is set it will execute the pipeline every bulk_size elements This operation will be atomic if transaction=True is passed ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): pipe = self.pipeline(transaction=transaction) pipe.multi() self._multi_rpush_pipeline(pipe, queue, values, bulk_size) pipe.execute() else: raise ValueError('Expected an iterable')
python
def multi_rpush(self, queue, values, bulk_size=0, transaction=False): ''' Pushes multiple elements to a list If bulk_size is set it will execute the pipeline every bulk_size elements This operation will be atomic if transaction=True is passed ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): pipe = self.pipeline(transaction=transaction) pipe.multi() self._multi_rpush_pipeline(pipe, queue, values, bulk_size) pipe.execute() else: raise ValueError('Expected an iterable')
[ "def", "multi_rpush", "(", "self", ",", "queue", ",", "values", ",", "bulk_size", "=", "0", ",", "transaction", "=", "False", ")", ":", "# Check that what we receive is iterable\r", "if", "hasattr", "(", "values", ",", "'__iter__'", ")", ":", "pipe", "=", "s...
Pushes multiple elements to a list If bulk_size is set it will execute the pipeline every bulk_size elements This operation will be atomic if transaction=True is passed
[ "Pushes", "multiple", "elements", "to", "a", "list", "If", "bulk_size", "is", "set", "it", "will", "execute", "the", "pipeline", "every", "bulk_size", "elements", "This", "operation", "will", "be", "atomic", "if", "transaction", "=", "True", "is", "passed" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L130-L142
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.multi_rpush_limit
def multi_rpush_limit(self, queue, values, limit=100000): ''' Pushes multiple elements to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic ''' lua = ''' local queue = KEYS[1] local max_size = tonumber(KEYS[2]) local table_len = tonumber(table.getn(ARGV)) local redis_queue_len = tonumber(redis.call('LLEN', queue)) local total_size = redis_queue_len + table_len local from = 0 if total_size >= max_size then -- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size redis.call('PUBLISH', 'DEBUG', 'trim') if redis_queue_len - max_size + table_len > 0 then from = redis_queue_len - max_size + table_len else from = 0 end redis.call('LTRIM', queue, from, redis_queue_len) end for _,key in ipairs(ARGV) do redis.call('RPUSH', queue, key) end return 1 ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): if len(values) > limit: raise ValueError('The iterable size is bigger than the allowed limit ({1}): {0}'.format(len(values), limit)) try: self.multi_rpush_limit_script([queue, limit], values) except AttributeError: if self.logger: self.logger.info('Script not registered... registering') # If the script is not registered, register it self.multi_rpush_limit_script = self.register_script(lua) self.multi_rpush_limit_script([queue, limit], values) else: raise ValueError('Expected an iterable')
python
def multi_rpush_limit(self, queue, values, limit=100000): ''' Pushes multiple elements to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic ''' lua = ''' local queue = KEYS[1] local max_size = tonumber(KEYS[2]) local table_len = tonumber(table.getn(ARGV)) local redis_queue_len = tonumber(redis.call('LLEN', queue)) local total_size = redis_queue_len + table_len local from = 0 if total_size >= max_size then -- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size redis.call('PUBLISH', 'DEBUG', 'trim') if redis_queue_len - max_size + table_len > 0 then from = redis_queue_len - max_size + table_len else from = 0 end redis.call('LTRIM', queue, from, redis_queue_len) end for _,key in ipairs(ARGV) do redis.call('RPUSH', queue, key) end return 1 ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): if len(values) > limit: raise ValueError('The iterable size is bigger than the allowed limit ({1}): {0}'.format(len(values), limit)) try: self.multi_rpush_limit_script([queue, limit], values) except AttributeError: if self.logger: self.logger.info('Script not registered... registering') # If the script is not registered, register it self.multi_rpush_limit_script = self.register_script(lua) self.multi_rpush_limit_script([queue, limit], values) else: raise ValueError('Expected an iterable')
[ "def", "multi_rpush_limit", "(", "self", ",", "queue", ",", "values", ",", "limit", "=", "100000", ")", ":", "lua", "=", "'''\r\n local queue = KEYS[1]\r\n local max_size = tonumber(KEYS[2])\r\n local table_len = tonumber(table.getn(ARGV))\r\n local redis_...
Pushes multiple elements to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic
[ "Pushes", "multiple", "elements", "to", "a", "list", "in", "an", "atomic", "way", "until", "it", "reaches", "certain", "size", "Once", "limit", "is", "reached", "the", "function", "will", "lpop", "the", "oldest", "elements", "This", "operation", "runs", "in"...
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L144-L188
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.rpush_limit
def rpush_limit(self, queue, value, limit=100000): ''' Pushes an element to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic ''' lua = ''' local queue = KEYS[1] local max_size = tonumber(KEYS[2]) local table_len = 1 local redis_queue_len = tonumber(redis.call('LLEN', queue)) local total_size = redis_queue_len + table_len local from = 0 if total_size >= max_size then -- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size redis.call('PUBLISH', 'DEBUG', 'trim') if redis_queue_len - max_size + table_len > 0 then from = redis_queue_len - max_size + table_len else from = 0 end redis.call('LTRIM', queue, from, redis_queue_len) end redis.call('RPUSH', queue, ARGV[1]) return 1 ''' try: self.rpush_limit_script([queue, limit], [value]) except AttributeError: if self.logger: self.logger.info('Script not registered... registering') # If the script is not registered, register it self.rpush_limit_script = self.register_script(lua) self.rpush_limit_script([queue, limit], [value])
python
def rpush_limit(self, queue, value, limit=100000): ''' Pushes an element to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic ''' lua = ''' local queue = KEYS[1] local max_size = tonumber(KEYS[2]) local table_len = 1 local redis_queue_len = tonumber(redis.call('LLEN', queue)) local total_size = redis_queue_len + table_len local from = 0 if total_size >= max_size then -- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size redis.call('PUBLISH', 'DEBUG', 'trim') if redis_queue_len - max_size + table_len > 0 then from = redis_queue_len - max_size + table_len else from = 0 end redis.call('LTRIM', queue, from, redis_queue_len) end redis.call('RPUSH', queue, ARGV[1]) return 1 ''' try: self.rpush_limit_script([queue, limit], [value]) except AttributeError: if self.logger: self.logger.info('Script not registered... registering') # If the script is not registered, register it self.rpush_limit_script = self.register_script(lua) self.rpush_limit_script([queue, limit], [value])
[ "def", "rpush_limit", "(", "self", ",", "queue", ",", "value", ",", "limit", "=", "100000", ")", ":", "lua", "=", "'''\r\n local queue = KEYS[1]\r\n local max_size = tonumber(KEYS[2])\r\n local table_len = 1\r\n local redis_queue_len = tonumber(redis.call(...
Pushes an element to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic
[ "Pushes", "an", "element", "to", "a", "list", "in", "an", "atomic", "way", "until", "it", "reaches", "certain", "size", "Once", "limit", "is", "reached", "the", "function", "will", "lpop", "the", "oldest", "elements", "This", "operation", "runs", "in", "LU...
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L190-L226
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.get_lock
def get_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock and returns if it can be stablished. Returns false otherwise ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False try: lock = rl.acquire(blocking=False) except RedisError: return False if not lock: return False else: return rl
python
def get_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock and returns if it can be stablished. Returns false otherwise ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False try: lock = rl.acquire(blocking=False) except RedisError: return False if not lock: return False else: return rl
[ "def", "get_lock", "(", "self", ",", "lockname", ",", "locktime", "=", "60", ",", "auto_renewal", "=", "False", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "caller", "=", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ...
Gets a lock and returns if it can be stablished. Returns false otherwise
[ "Gets", "a", "lock", "and", "returns", "if", "it", "can", "be", "stablished", ".", "Returns", "false", "otherwise" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L228-L246
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.wait_for_lock
def wait_for_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock or waits until it is able to get it ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except AssertionError: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False cont = 1 t0 = time.time() lock = None while not lock: time.sleep(.05) cont += 1 if cont % 20 == 0: if self.logger: self.logger.debug('Process {0} ({1}) waiting for lock {2}. {3} seconds elapsed.'.format(pid, caller, lockname, time.time() - t0)) # lock = rl.lock(lockname, locktime_ms) try: lock = rl.acquire() except RedisError: pass if self.logger: self.logger.debug('Process {0} ({1}) got lock {2} for {3} seconds'.format(pid, caller, lockname, locktime)) return rl
python
def wait_for_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock or waits until it is able to get it ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except AssertionError: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False cont = 1 t0 = time.time() lock = None while not lock: time.sleep(.05) cont += 1 if cont % 20 == 0: if self.logger: self.logger.debug('Process {0} ({1}) waiting for lock {2}. {3} seconds elapsed.'.format(pid, caller, lockname, time.time() - t0)) # lock = rl.lock(lockname, locktime_ms) try: lock = rl.acquire() except RedisError: pass if self.logger: self.logger.debug('Process {0} ({1}) got lock {2} for {3} seconds'.format(pid, caller, lockname, locktime)) return rl
[ "def", "wait_for_lock", "(", "self", ",", "lockname", ",", "locktime", "=", "60", ",", "auto_renewal", "=", "False", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "caller", "=", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "...
Gets a lock or waits until it is able to get it
[ "Gets", "a", "lock", "or", "waits", "until", "it", "is", "able", "to", "get", "it" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L248-L275
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.release_lock
def release_lock(self, lock, force=False): ''' Frees a lock ''' pid = os.getpid() caller = inspect.stack()[0][3] # try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) # except: # logger.error('Process {0} ({1}) could not release lock {2}'.format(pid, caller, lock.resource)) # return False if lock and lock._held: lock.release() if self.logger: self.logger.debug('Process {0} ({1}) released lock'.format(pid, caller))
python
def release_lock(self, lock, force=False): ''' Frees a lock ''' pid = os.getpid() caller = inspect.stack()[0][3] # try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) # except: # logger.error('Process {0} ({1}) could not release lock {2}'.format(pid, caller, lock.resource)) # return False if lock and lock._held: lock.release() if self.logger: self.logger.debug('Process {0} ({1}) released lock'.format(pid, caller))
[ "def", "release_lock", "(", "self", ",", "lock", ",", "force", "=", "False", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "caller", "=", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", "# try:\r", "# rl = redlock.Redlock([{...
Frees a lock
[ "Frees", "a", "lock" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L277-L289
stoneworksolutions/stoneredis
stoneredis/client.py
StoneRedis.pipeline
def pipeline(self, transaction=True, shard_hint=None): ''' Return a pipeline that support StoneRedis custom methods ''' args_dict = { 'connection_pool': self.connection_pool, 'response_callbacks': self.response_callbacks, 'transaction': transaction, 'shard_hint': shard_hint, 'logger': self.logger, } return StonePipeline(**args_dict)
python
def pipeline(self, transaction=True, shard_hint=None): ''' Return a pipeline that support StoneRedis custom methods ''' args_dict = { 'connection_pool': self.connection_pool, 'response_callbacks': self.response_callbacks, 'transaction': transaction, 'shard_hint': shard_hint, 'logger': self.logger, } return StonePipeline(**args_dict)
[ "def", "pipeline", "(", "self", ",", "transaction", "=", "True", ",", "shard_hint", "=", "None", ")", ":", "args_dict", "=", "{", "'connection_pool'", ":", "self", ".", "connection_pool", ",", "'response_callbacks'", ":", "self", ".", "response_callbacks", ","...
Return a pipeline that support StoneRedis custom methods
[ "Return", "a", "pipeline", "that", "support", "StoneRedis", "custom", "methods" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L291-L301
stoneworksolutions/stoneredis
stoneredis/client.py
StonePipeline.multi_lpop
def multi_lpop(self, queue, number, transaction=False): ''' Pops multiple elements from a list ''' try: self._multi_lpop_pipeline(self, queue, number) except: raise
python
def multi_lpop(self, queue, number, transaction=False): ''' Pops multiple elements from a list ''' try: self._multi_lpop_pipeline(self, queue, number) except: raise
[ "def", "multi_lpop", "(", "self", ",", "queue", ",", "number", ",", "transaction", "=", "False", ")", ":", "try", ":", "self", ".", "_multi_lpop_pipeline", "(", "self", ",", "queue", ",", "number", ")", "except", ":", "raise" ]
Pops multiple elements from a list
[ "Pops", "multiple", "elements", "from", "a", "list" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L319-L324
stoneworksolutions/stoneredis
stoneredis/client.py
StonePipeline.multi_rpush
def multi_rpush(self, queue, values, bulk_size=0, transaction=False): ''' Pushes multiple elements to a list ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): self._multi_rpush_pipeline(self, queue, values, 0) else: raise ValueError('Expected an iterable')
python
def multi_rpush(self, queue, values, bulk_size=0, transaction=False): ''' Pushes multiple elements to a list ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): self._multi_rpush_pipeline(self, queue, values, 0) else: raise ValueError('Expected an iterable')
[ "def", "multi_rpush", "(", "self", ",", "queue", ",", "values", ",", "bulk_size", "=", "0", ",", "transaction", "=", "False", ")", ":", "# Check that what we receive is iterable\r", "if", "hasattr", "(", "values", ",", "'__iter__'", ")", ":", "self", ".", "_...
Pushes multiple elements to a list
[ "Pushes", "multiple", "elements", "to", "a", "list" ]
train
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L326-L332
ShawnClake/Apitax
apitax/ah/api/util.py
_deserialize
def _deserialize(data, klass): """Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object. """ if data is None: return None if klass in six.integer_types or klass in (float, str, bool): return _deserialize_primitive(data, klass) elif klass == object: return _deserialize_object(data) elif klass == datetime.date: return deserialize_date(data) elif klass == datetime.datetime: return deserialize_datetime(data) elif type(klass) == typing.GenericMeta: if klass.__extra__ == list: return _deserialize_list(data, klass.__args__[0]) if klass.__extra__ == dict: return _deserialize_dict(data, klass.__args__[1]) else: return deserialize_model(data, klass)
python
def _deserialize(data, klass): """Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object. """ if data is None: return None if klass in six.integer_types or klass in (float, str, bool): return _deserialize_primitive(data, klass) elif klass == object: return _deserialize_object(data) elif klass == datetime.date: return deserialize_date(data) elif klass == datetime.datetime: return deserialize_datetime(data) elif type(klass) == typing.GenericMeta: if klass.__extra__ == list: return _deserialize_list(data, klass.__args__[0]) if klass.__extra__ == dict: return _deserialize_dict(data, klass.__args__[1]) else: return deserialize_model(data, klass)
[ "def", "_deserialize", "(", "data", ",", "klass", ")", ":", "if", "data", "is", "None", ":", "return", "None", "if", "klass", "in", "six", ".", "integer_types", "or", "klass", "in", "(", "float", ",", "str", ",", "bool", ")", ":", "return", "_deseria...
Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object.
[ "Deserializes", "dict", "list", "str", "into", "an", "object", "." ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/util.py#L7-L32
ShawnClake/Apitax
apitax/ah/api/util.py
_deserialize_primitive
def _deserialize_primitive(data, klass): """Deserializes to primitive type. :param data: data to deserialize. :param klass: class literal. :return: int, long, float, str, bool. :rtype: int | long | float | str | bool """ try: value = klass(data) except UnicodeEncodeError: value = six.u(data) except TypeError: value = data return value
python
def _deserialize_primitive(data, klass): """Deserializes to primitive type. :param data: data to deserialize. :param klass: class literal. :return: int, long, float, str, bool. :rtype: int | long | float | str | bool """ try: value = klass(data) except UnicodeEncodeError: value = six.u(data) except TypeError: value = data return value
[ "def", "_deserialize_primitive", "(", "data", ",", "klass", ")", ":", "try", ":", "value", "=", "klass", "(", "data", ")", "except", "UnicodeEncodeError", ":", "value", "=", "six", ".", "u", "(", "data", ")", "except", "TypeError", ":", "value", "=", "...
Deserializes to primitive type. :param data: data to deserialize. :param klass: class literal. :return: int, long, float, str, bool. :rtype: int | long | float | str | bool
[ "Deserializes", "to", "primitive", "type", "." ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/util.py#L35-L50
ShawnClake/Apitax
apitax/ah/api/util.py
_deserialize_dict
def _deserialize_dict(data, boxed_type): """Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict """ return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)}
python
def _deserialize_dict(data, boxed_type): """Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict """ return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)}
[ "def", "_deserialize_dict", "(", "data", ",", "boxed_type", ")", ":", "return", "{", "k", ":", "_deserialize", "(", "v", ",", "boxed_type", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "data", ")", "}" ]
Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict
[ "Deserializes", "a", "dict", "and", "its", "elements", "." ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/util.py#L130-L141
fredericklussier/ObservablePy
observablePy/ObserverStore.py
ObserverStore.remove
def remove(self, what, call): """ remove an observer what: (string | array) state fields to observe call: (function) when not given, decorator usage is assumed. The call function should have 2 parameters: - previousValue, - actualValue """ type = observerTypeEnum.typeOf(what) self._observers.remove({ "observing": what, "type": type, "call": call })
python
def remove(self, what, call): """ remove an observer what: (string | array) state fields to observe call: (function) when not given, decorator usage is assumed. The call function should have 2 parameters: - previousValue, - actualValue """ type = observerTypeEnum.typeOf(what) self._observers.remove({ "observing": what, "type": type, "call": call })
[ "def", "remove", "(", "self", ",", "what", ",", "call", ")", ":", "type", "=", "observerTypeEnum", ".", "typeOf", "(", "what", ")", "self", ".", "_observers", ".", "remove", "(", "{", "\"observing\"", ":", "what", ",", "\"type\"", ":", "type", ",", "...
remove an observer what: (string | array) state fields to observe call: (function) when not given, decorator usage is assumed. The call function should have 2 parameters: - previousValue, - actualValue
[ "remove", "an", "observer" ]
train
https://github.com/fredericklussier/ObservablePy/blob/fd7926a0568621f80b1d567d18f199976f1fa4e8/observablePy/ObserverStore.py#L31-L47
fredericklussier/ObservablePy
observablePy/ObserverStore.py
ObserverStore.getObservers
def getObservers(self): """ Get the list of observer to the instance of the class. :return: Subscribed Obversers. :rtype: Array """ result = [] for observer in self._observers: result.append( { "observing": observer["observing"], "call": observer["call"] }) return result
python
def getObservers(self): """ Get the list of observer to the instance of the class. :return: Subscribed Obversers. :rtype: Array """ result = [] for observer in self._observers: result.append( { "observing": observer["observing"], "call": observer["call"] }) return result
[ "def", "getObservers", "(", "self", ")", ":", "result", "=", "[", "]", "for", "observer", "in", "self", ".", "_observers", ":", "result", ".", "append", "(", "{", "\"observing\"", ":", "observer", "[", "\"observing\"", "]", ",", "\"call\"", ":", "observe...
Get the list of observer to the instance of the class. :return: Subscribed Obversers. :rtype: Array
[ "Get", "the", "list", "of", "observer", "to", "the", "instance", "of", "the", "class", "." ]
train
https://github.com/fredericklussier/ObservablePy/blob/fd7926a0568621f80b1d567d18f199976f1fa4e8/observablePy/ObserverStore.py#L55-L69
bioidiap/gridtk
gridtk/models.py
add_job
def add_job(session, command_line, name = 'job', dependencies = [], array = None, exec_dir=None, log_dir = None, stop_on_failure = False, **kwargs): """Helper function to create a job, add the dependencies and the array jobs.""" job = Job(command_line=command_line, name=name, exec_dir=exec_dir, log_dir=log_dir, array_string=array, stop_on_failure=stop_on_failure, kwargs=kwargs) session.add(job) session.flush() session.refresh(job) # by default id and unique id are identical, but the id might be overwritten later on job.id = job.unique for d in dependencies: if d == job.unique: logger.warn("Adding self-dependency of job %d is not allowed" % d) continue depending = list(session.query(Job).filter(Job.unique == d)) if len(depending): session.add(JobDependence(job.unique, depending[0].unique)) else: logger.warn("Could not find dependent job with id %d in database" % d) if array: (start, stop, step) = array # add array jobs for i in range(start, stop+1, step): session.add(ArrayJob(i, job.unique)) session.commit() return job
python
def add_job(session, command_line, name = 'job', dependencies = [], array = None, exec_dir=None, log_dir = None, stop_on_failure = False, **kwargs): """Helper function to create a job, add the dependencies and the array jobs.""" job = Job(command_line=command_line, name=name, exec_dir=exec_dir, log_dir=log_dir, array_string=array, stop_on_failure=stop_on_failure, kwargs=kwargs) session.add(job) session.flush() session.refresh(job) # by default id and unique id are identical, but the id might be overwritten later on job.id = job.unique for d in dependencies: if d == job.unique: logger.warn("Adding self-dependency of job %d is not allowed" % d) continue depending = list(session.query(Job).filter(Job.unique == d)) if len(depending): session.add(JobDependence(job.unique, depending[0].unique)) else: logger.warn("Could not find dependent job with id %d in database" % d) if array: (start, stop, step) = array # add array jobs for i in range(start, stop+1, step): session.add(ArrayJob(i, job.unique)) session.commit() return job
[ "def", "add_job", "(", "session", ",", "command_line", ",", "name", "=", "'job'", ",", "dependencies", "=", "[", "]", ",", "array", "=", "None", ",", "exec_dir", "=", "None", ",", "log_dir", "=", "None", ",", "stop_on_failure", "=", "False", ",", "*", ...
Helper function to create a job, add the dependencies and the array jobs.
[ "Helper", "function", "to", "create", "a", "job", "add", "the", "dependencies", "and", "the", "array", "jobs", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L359-L388
bioidiap/gridtk
gridtk/models.py
times
def times(job): """Returns a string containing timing information for teh given job, which might be a :py:class:`Job` or an :py:class:`ArrayJob`.""" timing = "Submitted: %s" % job.submit_time.ctime() if job.start_time is not None: timing += "\nStarted : %s \t Job waited : %s" % (job.start_time.ctime(), job.start_time - job.submit_time) if job.finish_time is not None: timing += "\nFinished : %s \t Job executed: %s" % (job.finish_time.ctime(), job.finish_time - job.start_time) return timing
python
def times(job): """Returns a string containing timing information for teh given job, which might be a :py:class:`Job` or an :py:class:`ArrayJob`.""" timing = "Submitted: %s" % job.submit_time.ctime() if job.start_time is not None: timing += "\nStarted : %s \t Job waited : %s" % (job.start_time.ctime(), job.start_time - job.submit_time) if job.finish_time is not None: timing += "\nFinished : %s \t Job executed: %s" % (job.finish_time.ctime(), job.finish_time - job.start_time) return timing
[ "def", "times", "(", "job", ")", ":", "timing", "=", "\"Submitted: %s\"", "%", "job", ".", "submit_time", ".", "ctime", "(", ")", "if", "job", ".", "start_time", "is", "not", "None", ":", "timing", "+=", "\"\\nStarted : %s \\t Job waited : %s\"", "%", "(",...
Returns a string containing timing information for teh given job, which might be a :py:class:`Job` or an :py:class:`ArrayJob`.
[ "Returns", "a", "string", "containing", "timing", "information", "for", "teh", "given", "job", "which", "might", "be", "a", ":", "py", ":", "class", ":", "Job", "or", "an", ":", "py", ":", "class", ":", "ArrayJob", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L390-L397
bioidiap/gridtk
gridtk/models.py
ArrayJob.format
def format(self, format): """Formats the current job into a nicer string to fit into a table.""" job_id = "%d - %d" % (self.job.id, self.id) queue = self.job.queue_name if self.machine_name is None else self.machine_name status = "%s" % self.status + (" (%d)" % self.result if self.result is not None else "" ) return format.format("", job_id, queue, status)
python
def format(self, format): """Formats the current job into a nicer string to fit into a table.""" job_id = "%d - %d" % (self.job.id, self.id) queue = self.job.queue_name if self.machine_name is None else self.machine_name status = "%s" % self.status + (" (%d)" % self.result if self.result is not None else "" ) return format.format("", job_id, queue, status)
[ "def", "format", "(", "self", ",", "format", ")", ":", "job_id", "=", "\"%d - %d\"", "%", "(", "self", ".", "job", ".", "id", ",", "self", ".", "id", ")", "queue", "=", "self", ".", "job", ".", "queue_name", "if", "self", ".", "machine_name", "is",...
Formats the current job into a nicer string to fit into a table.
[ "Formats", "the", "current", "job", "into", "a", "nicer", "string", "to", "fit", "into", "a", "table", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L63-L70
bioidiap/gridtk
gridtk/models.py
Job.submit
def submit(self, new_queue = None): """Sets the status of this job to 'submitted'.""" self.status = 'submitted' self.result = None self.machine_name = None if new_queue is not None: self.queue_name = new_queue for array_job in self.array: array_job.status = 'submitted' array_job.result = None array_job.machine_name = None self.submit_time = datetime.now() self.start_time = None self.finish_time = None
python
def submit(self, new_queue = None): """Sets the status of this job to 'submitted'.""" self.status = 'submitted' self.result = None self.machine_name = None if new_queue is not None: self.queue_name = new_queue for array_job in self.array: array_job.status = 'submitted' array_job.result = None array_job.machine_name = None self.submit_time = datetime.now() self.start_time = None self.finish_time = None
[ "def", "submit", "(", "self", ",", "new_queue", "=", "None", ")", ":", "self", ".", "status", "=", "'submitted'", "self", ".", "result", "=", "None", "self", ".", "machine_name", "=", "None", "if", "new_queue", "is", "not", "None", ":", "self", ".", ...
Sets the status of this job to 'submitted'.
[ "Sets", "the", "status", "of", "this", "job", "to", "submitted", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L111-L124
bioidiap/gridtk
gridtk/models.py
Job.queue
def queue(self, new_job_id = None, new_job_name = None, queue_name = None): """Sets the status of this job to 'queued' or 'waiting'.""" # update the job id (i.e., when the job is executed in the grid) if new_job_id is not None: self.id = new_job_id if new_job_name is not None: self.name = new_job_name if queue_name is not None: self.queue_name = queue_name new_status = 'queued' self.result = None # check if we have to wait for another job to finish for job in self.get_jobs_we_wait_for(): if job.status not in ('success', 'failure'): new_status = 'waiting' elif self.stop_on_failure and job.status == 'failure': new_status = 'failure' # reset the queued jobs that depend on us to waiting status for job in self.get_jobs_waiting_for_us(): if job.status == 'queued': job.status = 'failure' if new_status == 'failure' else 'waiting' self.status = new_status for array_job in self.array: if array_job.status not in ('success', 'failure'): array_job.status = new_status
python
def queue(self, new_job_id = None, new_job_name = None, queue_name = None): """Sets the status of this job to 'queued' or 'waiting'.""" # update the job id (i.e., when the job is executed in the grid) if new_job_id is not None: self.id = new_job_id if new_job_name is not None: self.name = new_job_name if queue_name is not None: self.queue_name = queue_name new_status = 'queued' self.result = None # check if we have to wait for another job to finish for job in self.get_jobs_we_wait_for(): if job.status not in ('success', 'failure'): new_status = 'waiting' elif self.stop_on_failure and job.status == 'failure': new_status = 'failure' # reset the queued jobs that depend on us to waiting status for job in self.get_jobs_waiting_for_us(): if job.status == 'queued': job.status = 'failure' if new_status == 'failure' else 'waiting' self.status = new_status for array_job in self.array: if array_job.status not in ('success', 'failure'): array_job.status = new_status
[ "def", "queue", "(", "self", ",", "new_job_id", "=", "None", ",", "new_job_name", "=", "None", ",", "queue_name", "=", "None", ")", ":", "# update the job id (i.e., when the job is executed in the grid)", "if", "new_job_id", "is", "not", "None", ":", "self", ".", ...
Sets the status of this job to 'queued' or 'waiting'.
[ "Sets", "the", "status", "of", "this", "job", "to", "queued", "or", "waiting", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L127-L156
bioidiap/gridtk
gridtk/models.py
Job.execute
def execute(self, array_id = None, machine_name = None): """Sets the status of this job to 'executing'.""" self.status = 'executing' if array_id is not None: for array_job in self.array: if array_job.id == array_id: array_job.status = 'executing' if machine_name is not None: array_job.machine_name = machine_name array_job.start_time = datetime.now() elif machine_name is not None: self.machine_name = machine_name if self.start_time is None: self.start_time = datetime.now() # sometimes, the 'finish' command did not work for array jobs, # so check if any old job still has the 'executing' flag set for job in self.get_jobs_we_wait_for(): if job.array and job.status == 'executing': job.finish(0, -1)
python
def execute(self, array_id = None, machine_name = None): """Sets the status of this job to 'executing'.""" self.status = 'executing' if array_id is not None: for array_job in self.array: if array_job.id == array_id: array_job.status = 'executing' if machine_name is not None: array_job.machine_name = machine_name array_job.start_time = datetime.now() elif machine_name is not None: self.machine_name = machine_name if self.start_time is None: self.start_time = datetime.now() # sometimes, the 'finish' command did not work for array jobs, # so check if any old job still has the 'executing' flag set for job in self.get_jobs_we_wait_for(): if job.array and job.status == 'executing': job.finish(0, -1)
[ "def", "execute", "(", "self", ",", "array_id", "=", "None", ",", "machine_name", "=", "None", ")", ":", "self", ".", "status", "=", "'executing'", "if", "array_id", "is", "not", "None", ":", "for", "array_job", "in", "self", ".", "array", ":", "if", ...
Sets the status of this job to 'executing'.
[ "Sets", "the", "status", "of", "this", "job", "to", "executing", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L159-L178
bioidiap/gridtk
gridtk/models.py
Job.finish
def finish(self, result, array_id = None): """Sets the status of this job to 'success' or 'failure'.""" # check if there is any array job still running new_status = 'success' if result == 0 else 'failure' new_result = result finished = True if array_id is not None: for array_job in self.array: if array_job.id == array_id: array_job.status = new_status array_job.result = result array_job.finish_time = datetime.now() if array_job.status not in ('success', 'failure'): finished = False elif new_result == 0: new_result = array_job.result if finished: # There was no array job, or all array jobs finished self.status = 'success' if new_result == 0 else 'failure' self.result = new_result self.finish_time = datetime.now() # update all waiting jobs for job in self.get_jobs_waiting_for_us(): if job.status == 'waiting': job.queue()
python
def finish(self, result, array_id = None): """Sets the status of this job to 'success' or 'failure'.""" # check if there is any array job still running new_status = 'success' if result == 0 else 'failure' new_result = result finished = True if array_id is not None: for array_job in self.array: if array_job.id == array_id: array_job.status = new_status array_job.result = result array_job.finish_time = datetime.now() if array_job.status not in ('success', 'failure'): finished = False elif new_result == 0: new_result = array_job.result if finished: # There was no array job, or all array jobs finished self.status = 'success' if new_result == 0 else 'failure' self.result = new_result self.finish_time = datetime.now() # update all waiting jobs for job in self.get_jobs_waiting_for_us(): if job.status == 'waiting': job.queue()
[ "def", "finish", "(", "self", ",", "result", ",", "array_id", "=", "None", ")", ":", "# check if there is any array job still running", "new_status", "=", "'success'", "if", "result", "==", "0", "else", "'failure'", "new_result", "=", "result", "finished", "=", ...
Sets the status of this job to 'success' or 'failure'.
[ "Sets", "the", "status", "of", "this", "job", "to", "success", "or", "failure", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L181-L207
bioidiap/gridtk
gridtk/models.py
Job.refresh
def refresh(self): """Refreshes the status information.""" if self.status == 'executing' and self.array: new_result = 0 for array_job in self.array: if array_job.status == 'failure' and new_result is not None: new_result = array_job.result elif array_job.status not in ('success', 'failure'): new_result = None if new_result is not None: self.status = 'success' if new_result == 0 else 'failure' self.result = new_result
python
def refresh(self): """Refreshes the status information.""" if self.status == 'executing' and self.array: new_result = 0 for array_job in self.array: if array_job.status == 'failure' and new_result is not None: new_result = array_job.result elif array_job.status not in ('success', 'failure'): new_result = None if new_result is not None: self.status = 'success' if new_result == 0 else 'failure' self.result = new_result
[ "def", "refresh", "(", "self", ")", ":", "if", "self", ".", "status", "==", "'executing'", "and", "self", ".", "array", ":", "new_result", "=", "0", "for", "array_job", "in", "self", ".", "array", ":", "if", "array_job", ".", "status", "==", "'failure'...
Refreshes the status information.
[ "Refreshes", "the", "status", "information", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L210-L221
bioidiap/gridtk
gridtk/models.py
Job.get_command_line
def get_command_line(self): """Returns the command line for the job.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return loads(self.command_line) if isinstance(self.command_line, bytes) else loads(self.command_line.encode())
python
def get_command_line(self): """Returns the command line for the job.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return loads(self.command_line) if isinstance(self.command_line, bytes) else loads(self.command_line.encode())
[ "def", "get_command_line", "(", "self", ")", ":", "# In python 2, the command line is unicode, which needs to be converted to string before pickling;", "# In python 3, the command line is bytes, which can be pickled directly", "return", "loads", "(", "self", ".", "command_line", ")", "...
Returns the command line for the job.
[ "Returns", "the", "command", "line", "for", "the", "job", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L224-L228
bioidiap/gridtk
gridtk/models.py
Job.get_exec_dir
def get_exec_dir(self): """Returns the command line for the job.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return str(os.path.realpath(self.exec_dir)) if self.exec_dir is not None else None
python
def get_exec_dir(self): """Returns the command line for the job.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return str(os.path.realpath(self.exec_dir)) if self.exec_dir is not None else None
[ "def", "get_exec_dir", "(", "self", ")", ":", "# In python 2, the command line is unicode, which needs to be converted to string before pickling;", "# In python 3, the command line is bytes, which can be pickled directly", "return", "str", "(", "os", ".", "path", ".", "realpath", "("...
Returns the command line for the job.
[ "Returns", "the", "command", "line", "for", "the", "job", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L234-L238
bioidiap/gridtk
gridtk/models.py
Job.get_array
def get_array(self): """Returns the array arguments for the job; usually a string.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return loads(self.array_string) if isinstance(self.array_string, bytes) else loads(self.array_string.encode())
python
def get_array(self): """Returns the array arguments for the job; usually a string.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return loads(self.array_string) if isinstance(self.array_string, bytes) else loads(self.array_string.encode())
[ "def", "get_array", "(", "self", ")", ":", "# In python 2, the command line is unicode, which needs to be converted to string before pickling;", "# In python 3, the command line is bytes, which can be pickled directly", "return", "loads", "(", "self", ".", "array_string", ")", "if", ...
Returns the array arguments for the job; usually a string.
[ "Returns", "the", "array", "arguments", "for", "the", "job", ";", "usually", "a", "string", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L242-L246
bioidiap/gridtk
gridtk/models.py
Job.get_arguments
def get_arguments(self): """Returns the additional options for the grid (such as the queue, memory requirements, ...).""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly args = loads(self.grid_arguments)['kwargs'] if isinstance(self.grid_arguments, bytes) else loads(self.grid_arguments.encode())['kwargs'] # in any case, the commands have to be converted to str retval = {} if 'pe_opt' in args: retval['pe_opt'] = args['pe_opt'] if 'memfree' in args and args['memfree'] is not None: retval['memfree'] = args['memfree'] if 'hvmem' in args and args['hvmem'] is not None: retval['hvmem'] = args['hvmem'] if 'gpumem' in args and args['gpumem'] is not None: retval['gpumem'] = args['gpumem'] if 'env' in args and len(args['env']) > 0: retval['env'] = args['env'] if 'io_big' in args and args['io_big']: retval['io_big'] = True # also add the queue if self.queue_name is not None: retval['queue'] = str(self.queue_name) return retval
python
def get_arguments(self): """Returns the additional options for the grid (such as the queue, memory requirements, ...).""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly args = loads(self.grid_arguments)['kwargs'] if isinstance(self.grid_arguments, bytes) else loads(self.grid_arguments.encode())['kwargs'] # in any case, the commands have to be converted to str retval = {} if 'pe_opt' in args: retval['pe_opt'] = args['pe_opt'] if 'memfree' in args and args['memfree'] is not None: retval['memfree'] = args['memfree'] if 'hvmem' in args and args['hvmem'] is not None: retval['hvmem'] = args['hvmem'] if 'gpumem' in args and args['gpumem'] is not None: retval['gpumem'] = args['gpumem'] if 'env' in args and len(args['env']) > 0: retval['env'] = args['env'] if 'io_big' in args and args['io_big']: retval['io_big'] = True # also add the queue if self.queue_name is not None: retval['queue'] = str(self.queue_name) return retval
[ "def", "get_arguments", "(", "self", ")", ":", "# In python 2, the command line is unicode, which needs to be converted to string before pickling;", "# In python 3, the command line is bytes, which can be pickled directly", "args", "=", "loads", "(", "self", ".", "grid_arguments", ")",...
Returns the additional options for the grid (such as the queue, memory requirements, ...).
[ "Returns", "the", "additional", "options", "for", "the", "grid", "(", "such", "as", "the", "queue", "memory", "requirements", "...", ")", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L249-L273
bioidiap/gridtk
gridtk/models.py
Job.format
def format(self, format, dependencies = 0, limit_command_line = None): """Formats the current job into a nicer string to fit into a table.""" command_line = self._cmdline() if limit_command_line is not None and len(command_line) > limit_command_line: command_line = command_line[:limit_command_line-3] + '...' job_id = "%d" % self.id + (" [%d-%d:%d]" % self.get_array() if self.array else "") status = "%s" % self.status + (" (%d)" % self.result if self.result is not None else "" ) queue = self.queue_name if self.machine_name is None else self.machine_name if limit_command_line is None: grid_opt = self.get_arguments() if grid_opt: # add additional information about the job at the end command_line = "<" + ",".join(["%s=%s" % (key,value) for key,value in grid_opt.items()]) + ">: " + command_line if self.exec_dir is not None: command_line += "; [Executed in directory: '%s']" % self.exec_dir if dependencies: deps = str(sorted(list(set([dep.unique for dep in self.get_jobs_we_wait_for()])))) if dependencies < len(deps): deps = deps[:dependencies-3] + '...' return format.format(self.unique, job_id, queue[:12], status, self.name, deps, command_line) else: return format.format(self.unique, job_id, queue[:12], status, self.name, command_line)
python
def format(self, format, dependencies = 0, limit_command_line = None): """Formats the current job into a nicer string to fit into a table.""" command_line = self._cmdline() if limit_command_line is not None and len(command_line) > limit_command_line: command_line = command_line[:limit_command_line-3] + '...' job_id = "%d" % self.id + (" [%d-%d:%d]" % self.get_array() if self.array else "") status = "%s" % self.status + (" (%d)" % self.result if self.result is not None else "" ) queue = self.queue_name if self.machine_name is None else self.machine_name if limit_command_line is None: grid_opt = self.get_arguments() if grid_opt: # add additional information about the job at the end command_line = "<" + ",".join(["%s=%s" % (key,value) for key,value in grid_opt.items()]) + ">: " + command_line if self.exec_dir is not None: command_line += "; [Executed in directory: '%s']" % self.exec_dir if dependencies: deps = str(sorted(list(set([dep.unique for dep in self.get_jobs_we_wait_for()])))) if dependencies < len(deps): deps = deps[:dependencies-3] + '...' return format.format(self.unique, job_id, queue[:12], status, self.name, deps, command_line) else: return format.format(self.unique, job_id, queue[:12], status, self.name, command_line)
[ "def", "format", "(", "self", ",", "format", ",", "dependencies", "=", "0", ",", "limit_command_line", "=", "None", ")", ":", "command_line", "=", "self", ".", "_cmdline", "(", ")", "if", "limit_command_line", "is", "not", "None", "and", "len", "(", "com...
Formats the current job into a nicer string to fit into a table.
[ "Formats", "the", "current", "job", "into", "a", "nicer", "string", "to", "fit", "into", "a", "table", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L314-L337
henzk/ape
ape/container_mode/utils.py
get_repo_name
def get_repo_name(repo_dir): """ Takes a directory (which must be a git repo) and returns the repository name, derived from remote.origin.url; <domain>/foo/bar.git => bar :param repo_dir: path of the directory :return: string """ repo = git.Repo(repo_dir) url = repo.remotes.origin.url return url.split('/')[-1].split('.git')[0]
python
def get_repo_name(repo_dir): """ Takes a directory (which must be a git repo) and returns the repository name, derived from remote.origin.url; <domain>/foo/bar.git => bar :param repo_dir: path of the directory :return: string """ repo = git.Repo(repo_dir) url = repo.remotes.origin.url return url.split('/')[-1].split('.git')[0]
[ "def", "get_repo_name", "(", "repo_dir", ")", ":", "repo", "=", "git", ".", "Repo", "(", "repo_dir", ")", "url", "=", "repo", ".", "remotes", ".", "origin", ".", "url", "return", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ".", "spl...
Takes a directory (which must be a git repo) and returns the repository name, derived from remote.origin.url; <domain>/foo/bar.git => bar :param repo_dir: path of the directory :return: string
[ "Takes", "a", "directory", "(", "which", "must", "be", "a", "git", "repo", ")", "and", "returns", "the", "repository", "name", "derived", "from", "remote", ".", "origin", ".", "url", ";", "<domain", ">", "/", "foo", "/", "bar", ".", "git", "=", ">", ...
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/utils.py#L7-L18
henzk/ape
ape/container_mode/utils.py
extract_feature_order_from_model_xml
def extract_feature_order_from_model_xml(file_path): """ Takes the path to a FeatureIDE model.xml file and extracts the feature order. :param file_path: path to the model file :return: list of features as strings """ root = xml.etree.ElementTree.parse(file_path).getroot() feature_order_node = root.find('featureOrder') features = [] for child in feature_order_node: features.append(child.attrib['name']) return features
python
def extract_feature_order_from_model_xml(file_path): """ Takes the path to a FeatureIDE model.xml file and extracts the feature order. :param file_path: path to the model file :return: list of features as strings """ root = xml.etree.ElementTree.parse(file_path).getroot() feature_order_node = root.find('featureOrder') features = [] for child in feature_order_node: features.append(child.attrib['name']) return features
[ "def", "extract_feature_order_from_model_xml", "(", "file_path", ")", ":", "root", "=", "xml", ".", "etree", ".", "ElementTree", ".", "parse", "(", "file_path", ")", ".", "getroot", "(", ")", "feature_order_node", "=", "root", ".", "find", "(", "'featureOrder'...
Takes the path to a FeatureIDE model.xml file and extracts the feature order. :param file_path: path to the model file :return: list of features as strings
[ "Takes", "the", "path", "to", "a", "FeatureIDE", "model", ".", "xml", "file", "and", "extracts", "the", "feature", "order", ".", ":", "param", "file_path", ":", "path", "to", "the", "model", "file", ":", "return", ":", "list", "of", "features", "as", "...
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/utils.py#L21-L35
henzk/ape
ape/container_mode/utils.py
get_feature_order_constraints
def get_feature_order_constraints(container_dir): """ Returns the feature order constraints dict defined in featuremodel/productline/feature_order.json :param container_dir: the container dir. :return: dict """ import json file_path = os.path.join(container_dir, '_lib/featuremodel/productline/feature_order.json') with open(file_path, 'r') as f: ordering_constraints = json.loads(f.read()) return ordering_constraints
python
def get_feature_order_constraints(container_dir): """ Returns the feature order constraints dict defined in featuremodel/productline/feature_order.json :param container_dir: the container dir. :return: dict """ import json file_path = os.path.join(container_dir, '_lib/featuremodel/productline/feature_order.json') with open(file_path, 'r') as f: ordering_constraints = json.loads(f.read()) return ordering_constraints
[ "def", "get_feature_order_constraints", "(", "container_dir", ")", ":", "import", "json", "file_path", "=", "os", ".", "path", ".", "join", "(", "container_dir", ",", "'_lib/featuremodel/productline/feature_order.json'", ")", "with", "open", "(", "file_path", ",", "...
Returns the feature order constraints dict defined in featuremodel/productline/feature_order.json :param container_dir: the container dir. :return: dict
[ "Returns", "the", "feature", "order", "constraints", "dict", "defined", "in", "featuremodel", "/", "productline", "/", "feature_order", ".", "json", ":", "param", "container_dir", ":", "the", "container", "dir", ".", ":", "return", ":", "dict" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/utils.py#L38-L50