repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
ianclegg/ntlmlib
ntlmlib/security.py
_Ntlm2Session.verify
def verify(self, message, signature): """ Verified the signature attached to the supplied message using NTLM2 Session Security :param message: The message whose signature will verified :return: True if the signature is valid, otherwise False """ # Parse the signature header mac = _Ntlm2MessageSignature() mac.from_string(signature) # validate the sequence if mac['sequence'] != self.incoming_sequence: raise Exception("The message was not received in the correct sequence.") # extract the supplied checksum checksum = struct.pack('<q', mac['checksum']) if self.key_exchange: checksum = self.incoming_seal.update(checksum) # calculate the expected checksum for the message hmac_context = hmac.new(self.incoming_signing_key) hmac_context.update(struct.pack('<i', self.incoming_sequence) + message) expected_checksum = hmac_context.digest()[:8] # validate the supplied checksum is correct if checksum != expected_checksum: raise Exception("The message has been altered") #logger.debug("Verify Sequence Number: %s", AsHex(self.outgoing_sequence)) self.incoming_sequence += 1
python
def verify(self, message, signature): """ Verified the signature attached to the supplied message using NTLM2 Session Security :param message: The message whose signature will verified :return: True if the signature is valid, otherwise False """ # Parse the signature header mac = _Ntlm2MessageSignature() mac.from_string(signature) # validate the sequence if mac['sequence'] != self.incoming_sequence: raise Exception("The message was not received in the correct sequence.") # extract the supplied checksum checksum = struct.pack('<q', mac['checksum']) if self.key_exchange: checksum = self.incoming_seal.update(checksum) # calculate the expected checksum for the message hmac_context = hmac.new(self.incoming_signing_key) hmac_context.update(struct.pack('<i', self.incoming_sequence) + message) expected_checksum = hmac_context.digest()[:8] # validate the supplied checksum is correct if checksum != expected_checksum: raise Exception("The message has been altered") #logger.debug("Verify Sequence Number: %s", AsHex(self.outgoing_sequence)) self.incoming_sequence += 1
[ "def", "verify", "(", "self", ",", "message", ",", "signature", ")", ":", "# Parse the signature header", "mac", "=", "_Ntlm2MessageSignature", "(", ")", "mac", ".", "from_string", "(", "signature", ")", "# validate the sequence", "if", "mac", "[", "'sequence'", ...
Verified the signature attached to the supplied message using NTLM2 Session Security :param message: The message whose signature will verified :return: True if the signature is valid, otherwise False
[ "Verified", "the", "signature", "attached", "to", "the", "supplied", "message", "using", "NTLM2", "Session", "Security", ":", "param", "message", ":", "The", "message", "whose", "signature", "will", "verified", ":", "return", ":", "True", "if", "the", "signatu...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/security.py#L290-L319
ianclegg/ntlmlib
ntlmlib/security.py
Ntlm2Sealing.wrap
def wrap(self, message): """ NTM GSSwrap() :param message: The message to be encrypted :return: A Tuple containing the signature and the encrypted messaging """ cipher_text = _Ntlm2Session.encrypt(self, message) signature = _Ntlm2Session.sign(self, message) return cipher_text, signature
python
def wrap(self, message): """ NTM GSSwrap() :param message: The message to be encrypted :return: A Tuple containing the signature and the encrypted messaging """ cipher_text = _Ntlm2Session.encrypt(self, message) signature = _Ntlm2Session.sign(self, message) return cipher_text, signature
[ "def", "wrap", "(", "self", ",", "message", ")", ":", "cipher_text", "=", "_Ntlm2Session", ".", "encrypt", "(", "self", ",", "message", ")", "signature", "=", "_Ntlm2Session", ".", "sign", "(", "self", ",", "message", ")", "return", "cipher_text", ",", "...
NTM GSSwrap() :param message: The message to be encrypted :return: A Tuple containing the signature and the encrypted messaging
[ "NTM", "GSSwrap", "()", ":", "param", "message", ":", "The", "message", "to", "be", "encrypted", ":", "return", ":", "A", "Tuple", "containing", "the", "signature", "and", "the", "encrypted", "messaging" ]
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/security.py#L356-L364
ianclegg/ntlmlib
ntlmlib/security.py
Ntlm2Sealing.unwrap
def unwrap(self, message, signature): """ NTLM GSSUnwrap() :param message: The message to be decrypted :return: The decrypted message """ plain_text = _Ntlm2Session.decrypt(self, message) _Ntlm2Session.verify(self, plain_text, signature) return plain_text
python
def unwrap(self, message, signature): """ NTLM GSSUnwrap() :param message: The message to be decrypted :return: The decrypted message """ plain_text = _Ntlm2Session.decrypt(self, message) _Ntlm2Session.verify(self, plain_text, signature) return plain_text
[ "def", "unwrap", "(", "self", ",", "message", ",", "signature", ")", ":", "plain_text", "=", "_Ntlm2Session", ".", "decrypt", "(", "self", ",", "message", ")", "_Ntlm2Session", ".", "verify", "(", "self", ",", "plain_text", ",", "signature", ")", "return",...
NTLM GSSUnwrap() :param message: The message to be decrypted :return: The decrypted message
[ "NTLM", "GSSUnwrap", "()", ":", "param", "message", ":", "The", "message", "to", "be", "decrypted", ":", "return", ":", "The", "decrypted", "message" ]
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/security.py#L366-L374
humangeo/rawes
rawes/connection_pool.py
ConnectionPool.mark_dead
def mark_dead(self, connection, now=None): """ Mark the connection as dead (failed). Remove it from the live pool and put it on a timeout. :arg connection: the failed instance """ # allow inject for testing purposes now = now if now else time.time() try: self.connections.remove(connection) except ValueError: # connection not alive or another thread marked it already, ignore return else: dead_count = self.dead_count.get(connection, 0) + 1 self.dead_count[connection] = dead_count timeout = self.dead_timeout * 2 ** min(dead_count - 1, self.timeout_cutoff) self.dead.put((now + timeout, connection)) logger.warning( 'Connection %r has failed for %i times in a row,' ' putting on %i second timeout.', connection, dead_count, timeout )
python
def mark_dead(self, connection, now=None): """ Mark the connection as dead (failed). Remove it from the live pool and put it on a timeout. :arg connection: the failed instance """ # allow inject for testing purposes now = now if now else time.time() try: self.connections.remove(connection) except ValueError: # connection not alive or another thread marked it already, ignore return else: dead_count = self.dead_count.get(connection, 0) + 1 self.dead_count[connection] = dead_count timeout = self.dead_timeout * 2 ** min(dead_count - 1, self.timeout_cutoff) self.dead.put((now + timeout, connection)) logger.warning( 'Connection %r has failed for %i times in a row,' ' putting on %i second timeout.', connection, dead_count, timeout )
[ "def", "mark_dead", "(", "self", ",", "connection", ",", "now", "=", "None", ")", ":", "# allow inject for testing purposes", "now", "=", "now", "if", "now", "else", "time", ".", "time", "(", ")", "try", ":", "self", ".", "connections", ".", "remove", "(...
Mark the connection as dead (failed). Remove it from the live pool and put it on a timeout. :arg connection: the failed instance
[ "Mark", "the", "connection", "as", "dead", "(", "failed", ")", ".", "Remove", "it", "from", "the", "live", "pool", "and", "put", "it", "on", "a", "timeout", "." ]
train
https://github.com/humangeo/rawes/blob/b860100cbb4115a1c884133c83eae448ded6b2d3/rawes/connection_pool.py#L121-L145
humangeo/rawes
rawes/connection_pool.py
ConnectionPool.resurrect
def resurrect(self, force=False): """ Attempt to resurrect a connection from the dead pool. It will try to locate one (not all) eligible (it's timeout is over) connection to return to th live pool. :arg force: resurrect a connection even if there is none eligible (used when we have no live connections) """ # no dead connections if self.dead.empty(): return try: # retrieve a connection to check timeout, connection = self.dead.get(block=False) except Empty: # other thread has been faster and the queue is now empty return if not force and timeout > time.time(): # return it back if not eligible and not forced self.dead.put((timeout, connection)) return # either we were forced or the connection is elligible to be retried self.connections.append(connection) logger.info('Resurrecting connection %r (force=%s).', connection, force)
python
def resurrect(self, force=False): """ Attempt to resurrect a connection from the dead pool. It will try to locate one (not all) eligible (it's timeout is over) connection to return to th live pool. :arg force: resurrect a connection even if there is none eligible (used when we have no live connections) """ # no dead connections if self.dead.empty(): return try: # retrieve a connection to check timeout, connection = self.dead.get(block=False) except Empty: # other thread has been faster and the queue is now empty return if not force and timeout > time.time(): # return it back if not eligible and not forced self.dead.put((timeout, connection)) return # either we were forced or the connection is elligible to be retried self.connections.append(connection) logger.info('Resurrecting connection %r (force=%s).', connection, force)
[ "def", "resurrect", "(", "self", ",", "force", "=", "False", ")", ":", "# no dead connections", "if", "self", ".", "dead", ".", "empty", "(", ")", ":", "return", "try", ":", "# retrieve a connection to check", "timeout", ",", "connection", "=", "self", ".", ...
Attempt to resurrect a connection from the dead pool. It will try to locate one (not all) eligible (it's timeout is over) connection to return to th live pool. :arg force: resurrect a connection even if there is none eligible (used when we have no live connections)
[ "Attempt", "to", "resurrect", "a", "connection", "from", "the", "dead", "pool", ".", "It", "will", "try", "to", "locate", "one", "(", "not", "all", ")", "eligible", "(", "it", "s", "timeout", "is", "over", ")", "connection", "to", "return", "to", "th",...
train
https://github.com/humangeo/rawes/blob/b860100cbb4115a1c884133c83eae448ded6b2d3/rawes/connection_pool.py#L160-L188
humangeo/rawes
rawes/connection_pool.py
ConnectionPool.get_connection
def get_connection(self): """ Return a connection from the pool using the `ConnectionSelector` instance. It tries to resurrect eligible connections, forces a resurrection when no connections are availible and passes the list of live connections to the selector instance to choose from. Returns a connection instance and it's current fail count. """ self.resurrect() # no live nodes, resurrect one by force if not self.connections: self.resurrect(True) connection = self.selector.select(self.connections) return connection
python
def get_connection(self): """ Return a connection from the pool using the `ConnectionSelector` instance. It tries to resurrect eligible connections, forces a resurrection when no connections are availible and passes the list of live connections to the selector instance to choose from. Returns a connection instance and it's current fail count. """ self.resurrect() # no live nodes, resurrect one by force if not self.connections: self.resurrect(True) connection = self.selector.select(self.connections) return connection
[ "def", "get_connection", "(", "self", ")", ":", "self", ".", "resurrect", "(", ")", "# no live nodes, resurrect one by force", "if", "not", "self", ".", "connections", ":", "self", ".", "resurrect", "(", "True", ")", "connection", "=", "self", ".", "selector",...
Return a connection from the pool using the `ConnectionSelector` instance. It tries to resurrect eligible connections, forces a resurrection when no connections are availible and passes the list of live connections to the selector instance to choose from. Returns a connection instance and it's current fail count.
[ "Return", "a", "connection", "from", "the", "pool", "using", "the", "ConnectionSelector", "instance", "." ]
train
https://github.com/humangeo/rawes/blob/b860100cbb4115a1c884133c83eae448ded6b2d3/rawes/connection_pool.py#L190-L208
demianbrecht/flask-canvas
flask_canvas.py
_canvas_route
def _canvas_route(self, *args, **kwargs): """ Decorator for canvas route """ def outer(view_fn): @self.route(*args, **kwargs) def inner(*args, **kwargs): fn_args = getargspec(view_fn) try: idx = fn_args.args.index(_ARG_KEY) except ValueError: idx = -1 if idx > -1: if 'error' in flask_request.args: return redirect('%s?error=%s' % ( self.config.get('CANVAS_ERROR_URI', '/'), flask_request.args.get('error'))) if 'signed_request' not in flask_request.form: self.logger.error('signed_request not in request.form') abort(403) try: _, decoded_data = _decode_signed_user( *flask_request.form['signed_request'].split('.')) except ValueError as e: self.logger.error(e.message) abort(403) if 'oauth_token' not in decoded_data: app.logger.info('unauthorized user, redirecting') return _authorize() user = User(**decoded_data) if not app.config.get('CANVAS_SKIP_AUTH_CHECK', False) \ and not user.has_permissions(): self.logger.info( 'user does not have the required permission set.') return _authorize() self.logger.info('all required permissions have been granted') args = args[:idx - 1] + (user,) + args[idx:] return view_fn(*args, **kwargs) return inner return outer
python
def _canvas_route(self, *args, **kwargs): """ Decorator for canvas route """ def outer(view_fn): @self.route(*args, **kwargs) def inner(*args, **kwargs): fn_args = getargspec(view_fn) try: idx = fn_args.args.index(_ARG_KEY) except ValueError: idx = -1 if idx > -1: if 'error' in flask_request.args: return redirect('%s?error=%s' % ( self.config.get('CANVAS_ERROR_URI', '/'), flask_request.args.get('error'))) if 'signed_request' not in flask_request.form: self.logger.error('signed_request not in request.form') abort(403) try: _, decoded_data = _decode_signed_user( *flask_request.form['signed_request'].split('.')) except ValueError as e: self.logger.error(e.message) abort(403) if 'oauth_token' not in decoded_data: app.logger.info('unauthorized user, redirecting') return _authorize() user = User(**decoded_data) if not app.config.get('CANVAS_SKIP_AUTH_CHECK', False) \ and not user.has_permissions(): self.logger.info( 'user does not have the required permission set.') return _authorize() self.logger.info('all required permissions have been granted') args = args[:idx - 1] + (user,) + args[idx:] return view_fn(*args, **kwargs) return inner return outer
[ "def", "_canvas_route", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "outer", "(", "view_fn", ")", ":", "@", "self", ".", "route", "(", "*", "args", ",", "*", "*", "kwargs", ")", "def", "inner", "(", "*", "args", ","...
Decorator for canvas route
[ "Decorator", "for", "canvas", "route" ]
train
https://github.com/demianbrecht/flask-canvas/blob/07aa310c43f9386598cdfd5b163f94efa7808c85/flask_canvas.py#L56-L102
demianbrecht/flask-canvas
flask_canvas.py
_decode_signed_user
def _decode_signed_user(encoded_sig, encoded_data): """ Decodes the ``POST``ed signed data """ decoded_sig = _decode(encoded_sig) decoded_data = loads(_decode(encoded_data)) if decoded_sig != hmac.new(app.config['CANVAS_CLIENT_SECRET'], encoded_data, sha256).digest(): raise ValueError("sig doesn't match hash") return decoded_sig, decoded_data
python
def _decode_signed_user(encoded_sig, encoded_data): """ Decodes the ``POST``ed signed data """ decoded_sig = _decode(encoded_sig) decoded_data = loads(_decode(encoded_data)) if decoded_sig != hmac.new(app.config['CANVAS_CLIENT_SECRET'], encoded_data, sha256).digest(): raise ValueError("sig doesn't match hash") return decoded_sig, decoded_data
[ "def", "_decode_signed_user", "(", "encoded_sig", ",", "encoded_data", ")", ":", "decoded_sig", "=", "_decode", "(", "encoded_sig", ")", "decoded_data", "=", "loads", "(", "_decode", "(", "encoded_data", ")", ")", "if", "decoded_sig", "!=", "hmac", ".", "new",...
Decodes the ``POST``ed signed data
[ "Decodes", "the", "POST", "ed", "signed", "data" ]
train
https://github.com/demianbrecht/flask-canvas/blob/07aa310c43f9386598cdfd5b163f94efa7808c85/flask_canvas.py#L133-L143
demianbrecht/flask-canvas
flask_canvas.py
User.request
def request(self, path, data=None, method='GET'): """ Convenience Facebook request function. Utility function to request resources via the graph API, with the format expected by Facebook. """ url = '%s%s?access_token=%s' % ( 'https://graph.facebook.com', path, self['oauth_token']) req = Request(url, data=data) req.get_method = lambda: method return loads(urlopen(req).read())
python
def request(self, path, data=None, method='GET'): """ Convenience Facebook request function. Utility function to request resources via the graph API, with the format expected by Facebook. """ url = '%s%s?access_token=%s' % ( 'https://graph.facebook.com', path, self['oauth_token']) req = Request(url, data=data) req.get_method = lambda: method return loads(urlopen(req).read())
[ "def", "request", "(", "self", ",", "path", ",", "data", "=", "None", ",", "method", "=", "'GET'", ")", ":", "url", "=", "'%s%s?access_token=%s'", "%", "(", "'https://graph.facebook.com'", ",", "path", ",", "self", "[", "'oauth_token'", "]", ")", "req", ...
Convenience Facebook request function. Utility function to request resources via the graph API, with the format expected by Facebook.
[ "Convenience", "Facebook", "request", "function", "." ]
train
https://github.com/demianbrecht/flask-canvas/blob/07aa310c43f9386598cdfd5b163f94efa7808c85/flask_canvas.py#L29-L43
demianbrecht/flask-canvas
flask_canvas.py
User.has_permissions
def has_permissions(self): """ Check current user permission set Checks the current user permission set against the one being requested by the application. """ perms = self.request('/me/permissions')['data'][0].keys() return all(k in perms for k in app.config[ 'CANVAS_SCOPE'].split(','))
python
def has_permissions(self): """ Check current user permission set Checks the current user permission set against the one being requested by the application. """ perms = self.request('/me/permissions')['data'][0].keys() return all(k in perms for k in app.config[ 'CANVAS_SCOPE'].split(','))
[ "def", "has_permissions", "(", "self", ")", ":", "perms", "=", "self", ".", "request", "(", "'/me/permissions'", ")", "[", "'data'", "]", "[", "0", "]", ".", "keys", "(", ")", "return", "all", "(", "k", "in", "perms", "for", "k", "in", "app", ".", ...
Check current user permission set Checks the current user permission set against the one being requested by the application.
[ "Check", "current", "user", "permission", "set" ]
train
https://github.com/demianbrecht/flask-canvas/blob/07aa310c43f9386598cdfd5b163f94efa7808c85/flask_canvas.py#L45-L53
mk-fg/feedjack
feedjack/filters.py
same_guid
def same_guid(post, parameter=DEFAULT_SIMILARITY_TIMESPAN): '''Skip posts with exactly same GUID. Parameter: comparison timespan, seconds (int, 0 = inf, default: {0}).''' from feedjack.models import Post if isinstance(parameter, types.StringTypes): parameter = int(parameter.strip()) similar = Post.objects.filtered(for_display=False)\ .exclude(id=post.id).filter(guid=post.guid) if parameter: similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=parameter)) return not bool(similar.exists())
python
def same_guid(post, parameter=DEFAULT_SIMILARITY_TIMESPAN): '''Skip posts with exactly same GUID. Parameter: comparison timespan, seconds (int, 0 = inf, default: {0}).''' from feedjack.models import Post if isinstance(parameter, types.StringTypes): parameter = int(parameter.strip()) similar = Post.objects.filtered(for_display=False)\ .exclude(id=post.id).filter(guid=post.guid) if parameter: similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=parameter)) return not bool(similar.exists())
[ "def", "same_guid", "(", "post", ",", "parameter", "=", "DEFAULT_SIMILARITY_TIMESPAN", ")", ":", "from", "feedjack", ".", "models", "import", "Post", "if", "isinstance", "(", "parameter", ",", "types", ".", "StringTypes", ")", ":", "parameter", "=", "int", "...
Skip posts with exactly same GUID. Parameter: comparison timespan, seconds (int, 0 = inf, default: {0}).
[ "Skip", "posts", "with", "exactly", "same", "GUID", ".", "Parameter", ":", "comparison", "timespan", "seconds", "(", "int", "0", "=", "inf", "default", ":", "{", "0", "}", ")", "." ]
train
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/filters.py#L43-L52
mk-fg/feedjack
feedjack/filters.py
similar_title
def similar_title(post, parameter=None): '''Skip posts with fuzzy-matched (threshold = levenshtein distance / length) title. Parameters (comma-delimited): minimal threshold, at which values are considired similar (float, 0 < x < 1, default: {0}); comparison timespan, seconds (int, 0 = inf, default: {1}).''' from feedjack.models import Post threshold, timespan = DEFAULT_SIMILARITY_THRESHOLD, DEFAULT_SIMILARITY_TIMESPAN if parameter: parameter = map(op.methodcaller('strip'), parameter.split(',', 1)) threshold = parameter.pop() try: threshold, timespan = parameter.pop(), threshold except IndexError: pass threshold, timespan = float(threshold), int(timespan) similar = Post.objects.filtered(for_display=False)\ .exclude(id=post.id).similar(threshold, title=post.title) if timespan: similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=timespan)) return not bool(similar.exists())
python
def similar_title(post, parameter=None): '''Skip posts with fuzzy-matched (threshold = levenshtein distance / length) title. Parameters (comma-delimited): minimal threshold, at which values are considired similar (float, 0 < x < 1, default: {0}); comparison timespan, seconds (int, 0 = inf, default: {1}).''' from feedjack.models import Post threshold, timespan = DEFAULT_SIMILARITY_THRESHOLD, DEFAULT_SIMILARITY_TIMESPAN if parameter: parameter = map(op.methodcaller('strip'), parameter.split(',', 1)) threshold = parameter.pop() try: threshold, timespan = parameter.pop(), threshold except IndexError: pass threshold, timespan = float(threshold), int(timespan) similar = Post.objects.filtered(for_display=False)\ .exclude(id=post.id).similar(threshold, title=post.title) if timespan: similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=timespan)) return not bool(similar.exists())
[ "def", "similar_title", "(", "post", ",", "parameter", "=", "None", ")", ":", "from", "feedjack", ".", "models", "import", "Post", "threshold", ",", "timespan", "=", "DEFAULT_SIMILARITY_THRESHOLD", ",", "DEFAULT_SIMILARITY_TIMESPAN", "if", "parameter", ":", "param...
Skip posts with fuzzy-matched (threshold = levenshtein distance / length) title. Parameters (comma-delimited): minimal threshold, at which values are considired similar (float, 0 < x < 1, default: {0}); comparison timespan, seconds (int, 0 = inf, default: {1}).
[ "Skip", "posts", "with", "fuzzy", "-", "matched", "(", "threshold", "=", "levenshtein", "distance", "/", "length", ")", "title", ".", "Parameters", "(", "comma", "-", "delimited", ")", ":", "minimal", "threshold", "at", "which", "values", "are", "considired"...
train
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/filters.py#L57-L74
mk-fg/feedjack
feedjack/filters.py
pick_enclosure_link
def pick_enclosure_link(post, parameter=''): '''Override URL of the Post to point to url of the first enclosure with href attribute non-empty and type matching specified regexp parameter (empty=any). Missing "type" attribute for enclosure will be matched as an empty string. If none of the enclosures match, link won't be updated.''' for e in (post.enclosures or list()): href = e.get('href') if not href: continue if parameter and not re.search(parameter, e.get('type', '')): continue return dict(link=href)
python
def pick_enclosure_link(post, parameter=''): '''Override URL of the Post to point to url of the first enclosure with href attribute non-empty and type matching specified regexp parameter (empty=any). Missing "type" attribute for enclosure will be matched as an empty string. If none of the enclosures match, link won't be updated.''' for e in (post.enclosures or list()): href = e.get('href') if not href: continue if parameter and not re.search(parameter, e.get('type', '')): continue return dict(link=href)
[ "def", "pick_enclosure_link", "(", "post", ",", "parameter", "=", "''", ")", ":", "for", "e", "in", "(", "post", ".", "enclosures", "or", "list", "(", ")", ")", ":", "href", "=", "e", ".", "get", "(", "'href'", ")", "if", "not", "href", ":", "con...
Override URL of the Post to point to url of the first enclosure with href attribute non-empty and type matching specified regexp parameter (empty=any). Missing "type" attribute for enclosure will be matched as an empty string. If none of the enclosures match, link won't be updated.
[ "Override", "URL", "of", "the", "Post", "to", "point", "to", "url", "of", "the", "first", "enclosure", "with", "href", "attribute", "non", "-", "empty", "and", "type", "matching", "specified", "regexp", "parameter", "(", "empty", "=", "any", ")", ".", "M...
train
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/filters.py#L83-L92
xolox/python-verboselogs
verboselogs/__init__.py
add_log_level
def add_log_level(value, name): """ Add a new log level to the :mod:`logging` module. :param value: The log level's number (an integer). :param name: The name for the log level (a string). """ logging.addLevelName(value, name) setattr(logging, name, value)
python
def add_log_level(value, name): """ Add a new log level to the :mod:`logging` module. :param value: The log level's number (an integer). :param name: The name for the log level (a string). """ logging.addLevelName(value, name) setattr(logging, name, value)
[ "def", "add_log_level", "(", "value", ",", "name", ")", ":", "logging", ".", "addLevelName", "(", "value", ",", "name", ")", "setattr", "(", "logging", ",", "name", ",", "value", ")" ]
Add a new log level to the :mod:`logging` module. :param value: The log level's number (an integer). :param name: The name for the log level (a string).
[ "Add", "a", "new", "log", "level", "to", "the", ":", "mod", ":", "logging", "module", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L92-L100
xolox/python-verboselogs
verboselogs/__init__.py
VerboseLogger.notice
def notice(self, msg, *args, **kw): """Log a message with level :data:`NOTICE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(NOTICE): self._log(NOTICE, msg, args, **kw)
python
def notice(self, msg, *args, **kw): """Log a message with level :data:`NOTICE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(NOTICE): self._log(NOTICE, msg, args, **kw)
[ "def", "notice", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "isEnabledFor", "(", "NOTICE", ")", ":", "self", ".", "_log", "(", "NOTICE", ",", "msg", ",", "args", ",", "*", "*", "kw", ")" ]
Log a message with level :data:`NOTICE`. The arguments are interpreted as for :func:`logging.debug()`.
[ "Log", "a", "message", "with", "level", ":", "data", ":", "NOTICE", ".", "The", "arguments", "are", "interpreted", "as", "for", ":", "func", ":", "logging", ".", "debug", "()", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L148-L151
xolox/python-verboselogs
verboselogs/__init__.py
VerboseLogger.spam
def spam(self, msg, *args, **kw): """Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SPAM): self._log(SPAM, msg, args, **kw)
python
def spam(self, msg, *args, **kw): """Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SPAM): self._log(SPAM, msg, args, **kw)
[ "def", "spam", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "isEnabledFor", "(", "SPAM", ")", ":", "self", ".", "_log", "(", "SPAM", ",", "msg", ",", "args", ",", "*", "*", "kw", ")" ]
Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.
[ "Log", "a", "message", "with", "level", ":", "data", ":", "SPAM", ".", "The", "arguments", "are", "interpreted", "as", "for", ":", "func", ":", "logging", ".", "debug", "()", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L153-L156
xolox/python-verboselogs
verboselogs/__init__.py
VerboseLogger.success
def success(self, msg, *args, **kw): """Log a message with level :data:`SUCCESS`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SUCCESS): self._log(SUCCESS, msg, args, **kw)
python
def success(self, msg, *args, **kw): """Log a message with level :data:`SUCCESS`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SUCCESS): self._log(SUCCESS, msg, args, **kw)
[ "def", "success", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "isEnabledFor", "(", "SUCCESS", ")", ":", "self", ".", "_log", "(", "SUCCESS", ",", "msg", ",", "args", ",", "*", "*", "kw", ")" ]
Log a message with level :data:`SUCCESS`. The arguments are interpreted as for :func:`logging.debug()`.
[ "Log", "a", "message", "with", "level", ":", "data", ":", "SUCCESS", ".", "The", "arguments", "are", "interpreted", "as", "for", ":", "func", ":", "logging", ".", "debug", "()", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L158-L161
xolox/python-verboselogs
verboselogs/__init__.py
VerboseLogger.verbose
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
python
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
[ "def", "verbose", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "isEnabledFor", "(", "VERBOSE", ")", ":", "self", ".", "_log", "(", "VERBOSE", ",", "msg", ",", "args", ",", "*", "*", "kw", ")" ]
Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.
[ "Log", "a", "message", "with", "level", ":", "data", ":", "VERBOSE", ".", "The", "arguments", "are", "interpreted", "as", "for", ":", "func", ":", "logging", ".", "debug", "()", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L163-L166
aiidateam/aiida-ase
aiida_ase/calculations/ase.py
get_calculator_impstr
def get_calculator_impstr(calculator_name): """ Returns the import string for the calculator """ if calculator_name.lower() == "gpaw" or calculator_name is None: return "from gpaw import GPAW as custom_calculator" elif calculator_name.lower() == "espresso": return "from espresso import espresso as custom_calculator" else: possibilities = {"abinit":"abinit.Abinit", "aims":"aims.Aims", "ase_qmmm_manyqm":"AseQmmmManyqm", "castep":"Castep", "dacapo":"Dacapo", "dftb":"Dftb", "eam":"EAM", "elk":"ELK", "emt":"EMT", "exciting":"Exciting", "fleur":"FLEUR", "gaussian":"Gaussian", "gromacs":"Gromacs", "mopac":"Mopac", "morse":"MorsePotential", "nwchem":"NWChem", 'siesta':"Siesta", "tip3p":"TIP3P", "turbomole":"Turbomole", "vasp":"Vasp", } current_val = possibilities.get(calculator_name.lower()) package, class_name = (calculator_name,current_val) if current_val else calculator_name.rsplit('.',1) return "from ase.calculators.{} import {} as custom_calculator".format(package, class_name)
python
def get_calculator_impstr(calculator_name): """ Returns the import string for the calculator """ if calculator_name.lower() == "gpaw" or calculator_name is None: return "from gpaw import GPAW as custom_calculator" elif calculator_name.lower() == "espresso": return "from espresso import espresso as custom_calculator" else: possibilities = {"abinit":"abinit.Abinit", "aims":"aims.Aims", "ase_qmmm_manyqm":"AseQmmmManyqm", "castep":"Castep", "dacapo":"Dacapo", "dftb":"Dftb", "eam":"EAM", "elk":"ELK", "emt":"EMT", "exciting":"Exciting", "fleur":"FLEUR", "gaussian":"Gaussian", "gromacs":"Gromacs", "mopac":"Mopac", "morse":"MorsePotential", "nwchem":"NWChem", 'siesta':"Siesta", "tip3p":"TIP3P", "turbomole":"Turbomole", "vasp":"Vasp", } current_val = possibilities.get(calculator_name.lower()) package, class_name = (calculator_name,current_val) if current_val else calculator_name.rsplit('.',1) return "from ase.calculators.{} import {} as custom_calculator".format(package, class_name)
[ "def", "get_calculator_impstr", "(", "calculator_name", ")", ":", "if", "calculator_name", ".", "lower", "(", ")", "==", "\"gpaw\"", "or", "calculator_name", "is", "None", ":", "return", "\"from gpaw import GPAW as custom_calculator\"", "elif", "calculator_name", ".", ...
Returns the import string for the calculator
[ "Returns", "the", "import", "string", "for", "the", "calculator" ]
train
https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L367-L402
aiidateam/aiida-ase
aiida_ase/calculations/ase.py
get_optimizer_impstr
def get_optimizer_impstr(optimizer_name): """ Returns the import string for the optimizer """ possibilities = {"bfgs":"BFGS", "bfgslinesearch":"BFGSLineSearch", "fire":"FIRE", "goodoldquasinewton":"GoodOldQuasiNewton", "hesslbfgs":"HessLBFGS", "lbfgs":"LBFGS", "lbfgslinesearch":"LBFGSLineSearch", "linelbfgs":"LineLBFGS", "mdmin":"MDMin", "ndpoly":"NDPoly", "quasinewton":"QuasiNewton", "scipyfmin":"SciPyFmin", "scipyfminbfgs":"SciPyFminBFGS", "scipyfmincg":"SciPyFminCG", "scipyfminpowell":"SciPyFminPowell", "scipygradientlessoptimizer":"SciPyGradientlessOptimizer", } current_val = possibilities.get(optimizer_name.lower()) if current_val: return "from ase.optimize import {} as custom_optimizer".format(current_val) else: package,current_val = optimizer_name.rsplit('.',1) return "from ase.optimize.{} import {} as custom_optimizer".format(package,current_val)
python
def get_optimizer_impstr(optimizer_name): """ Returns the import string for the optimizer """ possibilities = {"bfgs":"BFGS", "bfgslinesearch":"BFGSLineSearch", "fire":"FIRE", "goodoldquasinewton":"GoodOldQuasiNewton", "hesslbfgs":"HessLBFGS", "lbfgs":"LBFGS", "lbfgslinesearch":"LBFGSLineSearch", "linelbfgs":"LineLBFGS", "mdmin":"MDMin", "ndpoly":"NDPoly", "quasinewton":"QuasiNewton", "scipyfmin":"SciPyFmin", "scipyfminbfgs":"SciPyFminBFGS", "scipyfmincg":"SciPyFminCG", "scipyfminpowell":"SciPyFminPowell", "scipygradientlessoptimizer":"SciPyGradientlessOptimizer", } current_val = possibilities.get(optimizer_name.lower()) if current_val: return "from ase.optimize import {} as custom_optimizer".format(current_val) else: package,current_val = optimizer_name.rsplit('.',1) return "from ase.optimize.{} import {} as custom_optimizer".format(package,current_val)
[ "def", "get_optimizer_impstr", "(", "optimizer_name", ")", ":", "possibilities", "=", "{", "\"bfgs\"", ":", "\"BFGS\"", ",", "\"bfgslinesearch\"", ":", "\"BFGSLineSearch\"", ",", "\"fire\"", ":", "\"FIRE\"", ",", "\"goodoldquasinewton\"", ":", "\"GoodOldQuasiNewton\"", ...
Returns the import string for the optimizer
[ "Returns", "the", "import", "string", "for", "the", "optimizer" ]
train
https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L404-L432
aiidateam/aiida-ase
aiida_ase/calculations/ase.py
convert_the_getters
def convert_the_getters(getters): """ A function used to prepare the arguments of calculator and atoms getter methods """ return_list = [] for getter in getters: if isinstance(getter,basestring): out_args = "" method_name = getter else: method_name, a = getter out_args = convert_the_args(a) return_list.append( (method_name, out_args) ) return return_list
python
def convert_the_getters(getters): """ A function used to prepare the arguments of calculator and atoms getter methods """ return_list = [] for getter in getters: if isinstance(getter,basestring): out_args = "" method_name = getter else: method_name, a = getter out_args = convert_the_args(a) return_list.append( (method_name, out_args) ) return return_list
[ "def", "convert_the_getters", "(", "getters", ")", ":", "return_list", "=", "[", "]", "for", "getter", "in", "getters", ":", "if", "isinstance", "(", "getter", ",", "basestring", ")", ":", "out_args", "=", "\"\"", "method_name", "=", "getter", "else", ":",...
A function used to prepare the arguments of calculator and atoms getter methods
[ "A", "function", "used", "to", "prepare", "the", "arguments", "of", "calculator", "and", "atoms", "getter", "methods" ]
train
https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L434-L451
aiidateam/aiida-ase
aiida_ase/calculations/ase.py
convert_the_args
def convert_the_args(raw_args): """ Function used to convert the arguments of methods """ if not raw_args: return "" if isinstance(raw_args,dict): out_args = ", ".join([ "{}={}".format(k,v) for k,v in raw_args.iteritems() ]) elif isinstance(raw_args,(list,tuple)): new_list = [] for x in raw_args: if isinstance(x,basestring): new_list.append(x) elif isinstance(x,dict): new_list.append( ", ".join([ "{}={}".format(k,v) for k,v in x.iteritems() ]) ) else: raise ValueError("Error preparing the getters") out_args = ", ".join(new_list) else: raise ValueError("Couldn't recognize list of getters") return out_args
python
def convert_the_args(raw_args): """ Function used to convert the arguments of methods """ if not raw_args: return "" if isinstance(raw_args,dict): out_args = ", ".join([ "{}={}".format(k,v) for k,v in raw_args.iteritems() ]) elif isinstance(raw_args,(list,tuple)): new_list = [] for x in raw_args: if isinstance(x,basestring): new_list.append(x) elif isinstance(x,dict): new_list.append( ", ".join([ "{}={}".format(k,v) for k,v in x.iteritems() ]) ) else: raise ValueError("Error preparing the getters") out_args = ", ".join(new_list) else: raise ValueError("Couldn't recognize list of getters") return out_args
[ "def", "convert_the_args", "(", "raw_args", ")", ":", "if", "not", "raw_args", ":", "return", "\"\"", "if", "isinstance", "(", "raw_args", ",", "dict", ")", ":", "out_args", "=", "\", \"", ".", "join", "(", "[", "\"{}={}\"", ".", "format", "(", "k", ",...
Function used to convert the arguments of methods
[ "Function", "used", "to", "convert", "the", "arguments", "of", "methods" ]
train
https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L453-L474
aiidateam/aiida-ase
aiida_ase/calculations/ase.py
AseCalculation._prepare_for_submission
def _prepare_for_submission(self,tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: structure = inputdict.pop(self.get_linkname('structure')) except KeyError: raise InputValidationError("No structure specified for this " "calculation") if not isinstance(structure,StructureData): raise InputValidationError("structure node is not of type" "StructureData") try: settings = inputdict.pop(self.get_linkname('settings'),None) except KeyError: pass if settings is not None: if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: kpoints = inputdict.pop(self.get_linkname('kpoints'),None) except KeyError: pass if kpoints is not None: if not isinstance(kpoints, KpointsData): raise InputValidationError("kpoints is not of type KpointsData") ############################## # END OF INITIAL INPUT CHECK # ############################## # default atom getter: I will always retrieve the total energy at least default_atoms_getters = [ ["total_energy",""] ] # ================================ # save the structure in ase format atoms = structure.get_ase() atoms.write(tempfolder.get_abs_path(self._input_aseatoms)) # ================== prepare the arguments of functions ================ parameters_dict = parameters.get_dict() settings_dict = settings.get_dict() if settings is not None else {} # ==================== fix the args of the optimizer optimizer = parameters_dict.pop("optimizer",None) if optimizer is not None: # Validation if not isinstance(optimizer,dict): raise InputValidationError("optimizer key must contain a dictionary") # get the name of the optimizer optimizer_name = optimizer.pop("name",None) if optimizer_name is None: raise InputValidationError("Don't have access to the optimizer name") # prepare the arguments to be passed to the optimizer class optimizer_argsstr = "atoms, " + convert_the_args(optimizer.pop("args",[])) # prepare the arguments to be passed to optimizer.run() optimizer_runargsstr = convert_the_args(optimizer.pop("run_args",[])) # prepare the import string optimizer_import_string = get_optimizer_impstr(optimizer_name) # ================= determine the calculator name and its import ==== calculator = parameters_dict.pop("calculator",{}) calculator_import_string = get_calculator_impstr(calculator.pop("name",None)) # =================== prepare the arguments for the calculator call read_calc_args = calculator.pop("args",[]) #calc_args = calculator.pop("args",None) if read_calc_args is None: calc_argsstr = "" else: # transform a in "a" if a is a string (needed for formatting) calc_args = {} for k,v in read_calc_args.iteritems(): if isinstance(v, basestring): the_v = '"{}"'.format(v) else: the_v = v calc_args[k] = the_v def return_a_function(v): try: has_magic = "@function" in v.keys() except AttributeError: has_magic = False if has_magic: args_dict = {} for k2,v2 in v['args'].iteritems(): if isinstance(v2,basestring): the_v = '"{}"'.format(v2) else: the_v = v2 args_dict[k2] = the_v v2 = "{}({})".format(v['@function'], ", ".join(["{}={}".format(k_,v_) for k_,v_ in args_dict.iteritems()])) return v2 else: return v tmp_list = [ "{}={}".format(k,return_a_function(v)) for k,v in calc_args.iteritems() ] calc_argsstr = ", ".join( tmp_list ) # add kpoints if present if kpoints: #TODO: here only the mesh is supported # maybe kpoint lists are supported as well in ASE calculators try: mesh = kpoints.get_kpoints_mesh()[0] except AttributeError: raise InputValidationError("Coudn't find a mesh of kpoints" " in the KpointsData") calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] ) # =============== prepare the methods of atoms.get(), to save results atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) ) # =============== prepare the methods of calculator.get(), to save results calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) ) # ===================== build the strings with the module imports all_imports = ["import ase", 'import ase.io', "import json", "import numpy", calculator_import_string] if optimizer is not None: all_imports.append(optimizer_import_string) try: if "PW" in calc_args['mode'].values(): all_imports.append("from gpaw import PW") except KeyError: pass extra_imports = parameters_dict.pop("extra_imports",[]) for i in extra_imports: if isinstance(i,basestring): all_imports.append("import {}".format(i)) elif isinstance(i,(list,tuple)): if not all( [isinstance(j,basestring) for j in i] ): raise ValueError("extra import must contain strings") if len(i)==2: all_imports.append("from {} import {}".format(*i)) elif len(i)==3: all_imports.append("from {} import {} as {}".format(*i)) else: raise ValueError("format for extra imports not recognized") else: raise ValueError("format for extra imports not recognized") if self.get_withmpi(): all_imports.append( "from ase.parallel import paropen" ) all_imports_string = "\n".join(all_imports) + "\n" # =================== prepare the python script ======================== input_txt = "" input_txt += get_file_header() input_txt += "# calculation pk: {}\n".format(self.pk) input_txt += "\n" input_txt += all_imports_string input_txt += "\n" pre_lines = parameters_dict.pop("pre_lines",None) if pre_lines is not None: if not isinstance(pre_lines,(list,tuple)): raise ValueError("Prelines must be a list of strings") if not all( [isinstance(_,basestring) for _ in pre_lines] ): raise ValueError("Prelines must be a list of strings") input_txt += "\n".join(pre_lines) + "\n\n" input_txt += "atoms = ase.io.read('{}')\n".format(self._input_aseatoms) input_txt += "\n" input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr) input_txt += "atoms.set_calculator(calculator)\n" input_txt += "\n" if optimizer is not None: # here block the trajectory file name: trajectory = 'aiida.traj' input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr) input_txt += "optimizer.run({})\n".format(optimizer_runargsstr) input_txt += "\n" # now dump / calculate the results input_txt += "results = {}\n" for getter,getter_args in atoms_getters: input_txt += "results['{}'] = atoms.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" for getter,getter_args in calculator_getters: input_txt += "results['{}'] = calculator.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" # Convert to lists input_txt += "for k,v in results.iteritems():\n" input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n" input_txt += " results[k] = results[k].tolist()\n" input_txt += "\n" post_lines = parameters_dict.pop("post_lines",None) if post_lines is not None: if not isinstance(post_lines,(list,tuple)): raise ValueError("Postlines must be a list of strings") if not all( [isinstance(_,basestring) for _ in post_lines] ): raise ValueError("Postlines must be a list of strings") input_txt += "\n".join(post_lines) + "\n\n" # Dump results to file right_open = "paropen" if self.get_withmpi() else "open" input_txt += "with {}('{}', 'w') as f:\n".format(right_open, self._OUTPUT_FILE_NAME) input_txt += " json.dump(results,f)" input_txt += "\n" # Dump trajectory if present if optimizer is not None: input_txt += "atoms.write('{}')\n".format(self._output_aseatoms) input_txt += "\n" # write all the input script to a file input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename,'w') as infile: infile.write(input_txt) # ============================ calcinfo ================================ # TODO: look at the qmmm infoL: it might be necessary to put # some singlefiles in the directory. # right now it has to be taken care in the pre_lines local_copy_list = [] remote_copy_list = [] additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[]) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # Empty command line by default # calcinfo.cmdline_params = settings_dict.pop('CMDLINE', []) calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list codeinfo = CodeInfo() codeinfo.cmdline_params = [self._INPUT_FILE_NAME] #calcinfo.stdin_name = self._INPUT_FILE_NAME codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] # Retrieve files calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) calcinfo.retrieve_list.append(self._output_aseatoms) calcinfo.retrieve_list += additional_retrieve_list # TODO: I should have two ways of running it: with gpaw-python in parallel # and executing python if in serial return calcinfo
python
def _prepare_for_submission(self,tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: structure = inputdict.pop(self.get_linkname('structure')) except KeyError: raise InputValidationError("No structure specified for this " "calculation") if not isinstance(structure,StructureData): raise InputValidationError("structure node is not of type" "StructureData") try: settings = inputdict.pop(self.get_linkname('settings'),None) except KeyError: pass if settings is not None: if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: kpoints = inputdict.pop(self.get_linkname('kpoints'),None) except KeyError: pass if kpoints is not None: if not isinstance(kpoints, KpointsData): raise InputValidationError("kpoints is not of type KpointsData") ############################## # END OF INITIAL INPUT CHECK # ############################## # default atom getter: I will always retrieve the total energy at least default_atoms_getters = [ ["total_energy",""] ] # ================================ # save the structure in ase format atoms = structure.get_ase() atoms.write(tempfolder.get_abs_path(self._input_aseatoms)) # ================== prepare the arguments of functions ================ parameters_dict = parameters.get_dict() settings_dict = settings.get_dict() if settings is not None else {} # ==================== fix the args of the optimizer optimizer = parameters_dict.pop("optimizer",None) if optimizer is not None: # Validation if not isinstance(optimizer,dict): raise InputValidationError("optimizer key must contain a dictionary") # get the name of the optimizer optimizer_name = optimizer.pop("name",None) if optimizer_name is None: raise InputValidationError("Don't have access to the optimizer name") # prepare the arguments to be passed to the optimizer class optimizer_argsstr = "atoms, " + convert_the_args(optimizer.pop("args",[])) # prepare the arguments to be passed to optimizer.run() optimizer_runargsstr = convert_the_args(optimizer.pop("run_args",[])) # prepare the import string optimizer_import_string = get_optimizer_impstr(optimizer_name) # ================= determine the calculator name and its import ==== calculator = parameters_dict.pop("calculator",{}) calculator_import_string = get_calculator_impstr(calculator.pop("name",None)) # =================== prepare the arguments for the calculator call read_calc_args = calculator.pop("args",[]) #calc_args = calculator.pop("args",None) if read_calc_args is None: calc_argsstr = "" else: # transform a in "a" if a is a string (needed for formatting) calc_args = {} for k,v in read_calc_args.iteritems(): if isinstance(v, basestring): the_v = '"{}"'.format(v) else: the_v = v calc_args[k] = the_v def return_a_function(v): try: has_magic = "@function" in v.keys() except AttributeError: has_magic = False if has_magic: args_dict = {} for k2,v2 in v['args'].iteritems(): if isinstance(v2,basestring): the_v = '"{}"'.format(v2) else: the_v = v2 args_dict[k2] = the_v v2 = "{}({})".format(v['@function'], ", ".join(["{}={}".format(k_,v_) for k_,v_ in args_dict.iteritems()])) return v2 else: return v tmp_list = [ "{}={}".format(k,return_a_function(v)) for k,v in calc_args.iteritems() ] calc_argsstr = ", ".join( tmp_list ) # add kpoints if present if kpoints: #TODO: here only the mesh is supported # maybe kpoint lists are supported as well in ASE calculators try: mesh = kpoints.get_kpoints_mesh()[0] except AttributeError: raise InputValidationError("Coudn't find a mesh of kpoints" " in the KpointsData") calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] ) # =============== prepare the methods of atoms.get(), to save results atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) ) # =============== prepare the methods of calculator.get(), to save results calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) ) # ===================== build the strings with the module imports all_imports = ["import ase", 'import ase.io', "import json", "import numpy", calculator_import_string] if optimizer is not None: all_imports.append(optimizer_import_string) try: if "PW" in calc_args['mode'].values(): all_imports.append("from gpaw import PW") except KeyError: pass extra_imports = parameters_dict.pop("extra_imports",[]) for i in extra_imports: if isinstance(i,basestring): all_imports.append("import {}".format(i)) elif isinstance(i,(list,tuple)): if not all( [isinstance(j,basestring) for j in i] ): raise ValueError("extra import must contain strings") if len(i)==2: all_imports.append("from {} import {}".format(*i)) elif len(i)==3: all_imports.append("from {} import {} as {}".format(*i)) else: raise ValueError("format for extra imports not recognized") else: raise ValueError("format for extra imports not recognized") if self.get_withmpi(): all_imports.append( "from ase.parallel import paropen" ) all_imports_string = "\n".join(all_imports) + "\n" # =================== prepare the python script ======================== input_txt = "" input_txt += get_file_header() input_txt += "# calculation pk: {}\n".format(self.pk) input_txt += "\n" input_txt += all_imports_string input_txt += "\n" pre_lines = parameters_dict.pop("pre_lines",None) if pre_lines is not None: if not isinstance(pre_lines,(list,tuple)): raise ValueError("Prelines must be a list of strings") if not all( [isinstance(_,basestring) for _ in pre_lines] ): raise ValueError("Prelines must be a list of strings") input_txt += "\n".join(pre_lines) + "\n\n" input_txt += "atoms = ase.io.read('{}')\n".format(self._input_aseatoms) input_txt += "\n" input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr) input_txt += "atoms.set_calculator(calculator)\n" input_txt += "\n" if optimizer is not None: # here block the trajectory file name: trajectory = 'aiida.traj' input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr) input_txt += "optimizer.run({})\n".format(optimizer_runargsstr) input_txt += "\n" # now dump / calculate the results input_txt += "results = {}\n" for getter,getter_args in atoms_getters: input_txt += "results['{}'] = atoms.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" for getter,getter_args in calculator_getters: input_txt += "results['{}'] = calculator.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" # Convert to lists input_txt += "for k,v in results.iteritems():\n" input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n" input_txt += " results[k] = results[k].tolist()\n" input_txt += "\n" post_lines = parameters_dict.pop("post_lines",None) if post_lines is not None: if not isinstance(post_lines,(list,tuple)): raise ValueError("Postlines must be a list of strings") if not all( [isinstance(_,basestring) for _ in post_lines] ): raise ValueError("Postlines must be a list of strings") input_txt += "\n".join(post_lines) + "\n\n" # Dump results to file right_open = "paropen" if self.get_withmpi() else "open" input_txt += "with {}('{}', 'w') as f:\n".format(right_open, self._OUTPUT_FILE_NAME) input_txt += " json.dump(results,f)" input_txt += "\n" # Dump trajectory if present if optimizer is not None: input_txt += "atoms.write('{}')\n".format(self._output_aseatoms) input_txt += "\n" # write all the input script to a file input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename,'w') as infile: infile.write(input_txt) # ============================ calcinfo ================================ # TODO: look at the qmmm infoL: it might be necessary to put # some singlefiles in the directory. # right now it has to be taken care in the pre_lines local_copy_list = [] remote_copy_list = [] additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[]) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # Empty command line by default # calcinfo.cmdline_params = settings_dict.pop('CMDLINE', []) calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list codeinfo = CodeInfo() codeinfo.cmdline_params = [self._INPUT_FILE_NAME] #calcinfo.stdin_name = self._INPUT_FILE_NAME codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] # Retrieve files calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) calcinfo.retrieve_list.append(self._output_aseatoms) calcinfo.retrieve_list += additional_retrieve_list # TODO: I should have two ways of running it: with gpaw-python in parallel # and executing python if in serial return calcinfo
[ "def", "_prepare_for_submission", "(", "self", ",", "tempfolder", ",", "inputdict", ")", ":", "try", ":", "code", "=", "inputdict", ".", "pop", "(", "self", ".", "get_linkname", "(", "'code'", ")", ")", "except", "KeyError", ":", "raise", "InputValidationErr...
This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!)
[ "This", "is", "the", "routine", "to", "be", "called", "when", "you", "want", "to", "create", "the", "input", "files", "and", "related", "stuff", "with", "a", "plugin", ".", ":", "param", "tempfolder", ":", "a", "aiida", ".", "common", ".", "folders", "...
train
https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L64-L365
daethnir/authprogs
setup.py
runcmd
def runcmd(command, command_input=None, cwd=None): """Run a command, potentially sending stdin, and capturing stdout/err.""" proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) (stdout, stderr) = proc.communicate(command_input) if proc.returncode != 0: sys.stderr.write('ABORTING: command "%s" failed w/ code %s:\n' '%s\n%s' % (command, proc.returncode, stdout, stderr)) sys.exit(proc.returncode) return proc.returncode, stdout, stderr
python
def runcmd(command, command_input=None, cwd=None): """Run a command, potentially sending stdin, and capturing stdout/err.""" proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) (stdout, stderr) = proc.communicate(command_input) if proc.returncode != 0: sys.stderr.write('ABORTING: command "%s" failed w/ code %s:\n' '%s\n%s' % (command, proc.returncode, stdout, stderr)) sys.exit(proc.returncode) return proc.returncode, stdout, stderr
[ "def", "runcmd", "(", "command", ",", "command_input", "=", "None", ",", "cwd", "=", "None", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE",...
Run a command, potentially sending stdin, and capturing stdout/err.
[ "Run", "a", "command", "potentially", "sending", "stdin", "and", "capturing", "stdout", "/", "err", "." ]
train
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L33-L45
daethnir/authprogs
setup.py
Converter.dd_docs
def dd_docs(self): """Copy and convert various documentation files.""" top = os.path.join(os.path.dirname(__file__)) doc = os.path.join(top, 'doc') # Markdown to ronn to man page man_md = os.path.join(doc, 'authprogs.md') man_ronn = os.path.join(doc, 'authprogs.1.ronn') man_1 = os.path.join(doc, 'authprogs.1') # Create manpage try: if not os.path.exists(man_1): shutil.copy(man_md, man_ronn) self.created.append(man_ronn) retval = subprocess.call(['ronn', '-r', man_ronn]) if retval != 0: raise Exception('ronn man page conversion failed, ' 'returned %s' % retval) self.created.append(man_1) except: raise Exception('ronn required for manpage conversion - do you ' 'have it installed?') # Markdown files in docs dir get converted to .html for name in MARKDOWN2HTML: htmlfile = os.path.join(doc, '%s.html' % name) if os.path.exists(htmlfile): continue target = open(htmlfile, 'w') self.created.append(htmlfile) stdout = runcmd(['python', '-m', 'markdown', os.path.join(doc, '%s.md' % name)])[1] if not stdout: raise Exception('markdown conversion failed, no output.') target.write(stdout) target.close() # Markdown files in top level just get renamed sans .md for name in MARKDOWN2TEXT: target = os.path.join(top, name) if os.path.exists(target): continue source = os.path.join(top, '%s.md' % target) shutil.copy(source, target) self.created.append(target)
python
def dd_docs(self): """Copy and convert various documentation files.""" top = os.path.join(os.path.dirname(__file__)) doc = os.path.join(top, 'doc') # Markdown to ronn to man page man_md = os.path.join(doc, 'authprogs.md') man_ronn = os.path.join(doc, 'authprogs.1.ronn') man_1 = os.path.join(doc, 'authprogs.1') # Create manpage try: if not os.path.exists(man_1): shutil.copy(man_md, man_ronn) self.created.append(man_ronn) retval = subprocess.call(['ronn', '-r', man_ronn]) if retval != 0: raise Exception('ronn man page conversion failed, ' 'returned %s' % retval) self.created.append(man_1) except: raise Exception('ronn required for manpage conversion - do you ' 'have it installed?') # Markdown files in docs dir get converted to .html for name in MARKDOWN2HTML: htmlfile = os.path.join(doc, '%s.html' % name) if os.path.exists(htmlfile): continue target = open(htmlfile, 'w') self.created.append(htmlfile) stdout = runcmd(['python', '-m', 'markdown', os.path.join(doc, '%s.md' % name)])[1] if not stdout: raise Exception('markdown conversion failed, no output.') target.write(stdout) target.close() # Markdown files in top level just get renamed sans .md for name in MARKDOWN2TEXT: target = os.path.join(top, name) if os.path.exists(target): continue source = os.path.join(top, '%s.md' % target) shutil.copy(source, target) self.created.append(target)
[ "def", "dd_docs", "(", "self", ")", ":", "top", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "doc", "=", "os", ".", "path", ".", "join", "(", "top", ",", "'doc'", ")", "# Markdown to ronn...
Copy and convert various documentation files.
[ "Copy", "and", "convert", "various", "documentation", "files", "." ]
train
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L54-L100
daethnir/authprogs
setup.py
Converter.rm_docs
def rm_docs(self): """Remove converted docs.""" for filename in self.created: if os.path.exists(filename): os.unlink(filename)
python
def rm_docs(self): """Remove converted docs.""" for filename in self.created: if os.path.exists(filename): os.unlink(filename)
[ "def", "rm_docs", "(", "self", ")", ":", "for", "filename", "in", "self", ".", "created", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "os", ".", "unlink", "(", "filename", ")" ]
Remove converted docs.
[ "Remove", "converted", "docs", "." ]
train
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L102-L106
radzak/rtv-downloader
rtv/extractors/rmf24.py
Rmf24.extract_entry
def extract_entry(scraped_info): """ Transform scraped_info dictionary into an entry, under the assumption that there is only one track in 'track' list, since each video/audio is instantiated individually on the RMF website and each of them is scraped independently, so there shouldn't be cases when there are 2 unrelated tracks in one info_dict. Args: scraped_info (dict): Video info dict, scraped straight from the website. Returns: dict: Entry containing title, formats (url, quality), thumbnail, etc. """ quality_mapping = { # ascending in terms of quality 'lo': 0, 'hi': 1 } entry = scraped_info['tracks'][0] ''' The structure of entry is as follows: 'src': { 'hi': [ { 'src': 'http://v.iplsc.com/30-11-gosc-marek-jakubiak/0007124B3CGCAE6P-A1.mp4', 'type': 'video/mp4' } ], 'lo': [ { 'src': 'http://v.iplsc.com/30-11-gosc-marek-jakubiak/0007124B3CGCAE6P-A1.mp4', 'type': 'video/mp4' } ] } ''' sources = entry.pop('src') # TODO: #LOW_PRIOR Remove date from title of audio files e.g. '10.06 Gość: Jarosław Gowin' formats = [] for src_name, src in sources.items(): url = src[0]['src'] formats.append({ 'url': url, 'quality': quality_mapping[src_name], 'ext': get_ext(url), 'width': int(scraped_info.get('width', 0)), 'height': int(scraped_info.get('height', 0)), }) # outer level url and ext come from the video of the lowest quality # you can access rest of the urls under 'formats' key worst_format = min(formats, key=lambda f: f['quality']) entry.update({ **entry.pop('data'), 'formats': formats, 'url': worst_format['url'], 'ext': worst_format['ext'] }) return entry
python
def extract_entry(scraped_info): """ Transform scraped_info dictionary into an entry, under the assumption that there is only one track in 'track' list, since each video/audio is instantiated individually on the RMF website and each of them is scraped independently, so there shouldn't be cases when there are 2 unrelated tracks in one info_dict. Args: scraped_info (dict): Video info dict, scraped straight from the website. Returns: dict: Entry containing title, formats (url, quality), thumbnail, etc. """ quality_mapping = { # ascending in terms of quality 'lo': 0, 'hi': 1 } entry = scraped_info['tracks'][0] ''' The structure of entry is as follows: 'src': { 'hi': [ { 'src': 'http://v.iplsc.com/30-11-gosc-marek-jakubiak/0007124B3CGCAE6P-A1.mp4', 'type': 'video/mp4' } ], 'lo': [ { 'src': 'http://v.iplsc.com/30-11-gosc-marek-jakubiak/0007124B3CGCAE6P-A1.mp4', 'type': 'video/mp4' } ] } ''' sources = entry.pop('src') # TODO: #LOW_PRIOR Remove date from title of audio files e.g. '10.06 Gość: Jarosław Gowin' formats = [] for src_name, src in sources.items(): url = src[0]['src'] formats.append({ 'url': url, 'quality': quality_mapping[src_name], 'ext': get_ext(url), 'width': int(scraped_info.get('width', 0)), 'height': int(scraped_info.get('height', 0)), }) # outer level url and ext come from the video of the lowest quality # you can access rest of the urls under 'formats' key worst_format = min(formats, key=lambda f: f['quality']) entry.update({ **entry.pop('data'), 'formats': formats, 'url': worst_format['url'], 'ext': worst_format['ext'] }) return entry
[ "def", "extract_entry", "(", "scraped_info", ")", ":", "quality_mapping", "=", "{", "# ascending in terms of quality", "'lo'", ":", "0", ",", "'hi'", ":", "1", "}", "entry", "=", "scraped_info", "[", "'tracks'", "]", "[", "0", "]", "'''\n The structure of...
Transform scraped_info dictionary into an entry, under the assumption that there is only one track in 'track' list, since each video/audio is instantiated individually on the RMF website and each of them is scraped independently, so there shouldn't be cases when there are 2 unrelated tracks in one info_dict. Args: scraped_info (dict): Video info dict, scraped straight from the website. Returns: dict: Entry containing title, formats (url, quality), thumbnail, etc.
[ "Transform", "scraped_info", "dictionary", "into", "an", "entry", "under", "the", "assumption", "that", "there", "is", "only", "one", "track", "in", "track", "list", "since", "each", "video", "/", "audio", "is", "instantiated", "individually", "on", "the", "RM...
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/extractors/rmf24.py#L32-L96
theiviaxx/Frog
frog/views/comment.py
index
def index(request, obj_id): """Handles a request based on method and calls the appropriate function""" if request.method == 'GET': return get(request, obj_id) elif request.method == 'PUT': getPutData(request) return put(request, obj_id)
python
def index(request, obj_id): """Handles a request based on method and calls the appropriate function""" if request.method == 'GET': return get(request, obj_id) elif request.method == 'PUT': getPutData(request) return put(request, obj_id)
[ "def", "index", "(", "request", ",", "obj_id", ")", ":", "if", "request", ".", "method", "==", "'GET'", ":", "return", "get", "(", "request", ",", "obj_id", ")", "elif", "request", ".", "method", "==", "'PUT'", ":", "getPutData", "(", "request", ")", ...
Handles a request based on method and calls the appropriate function
[ "Handles", "a", "request", "based", "on", "method", "and", "calls", "the", "appropriate", "function" ]
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/comment.py#L49-L55
theiviaxx/Frog
frog/views/comment.py
get
def get(request, obj_id): """Returns a serialized object :param obj_id: ID of comment object :type obj_id: int :returns: json """ res = Result() c = Comment.objects.get(pk=obj_id) res.append(commentToJson(c)) return JsonResponse(res.asDict())
python
def get(request, obj_id): """Returns a serialized object :param obj_id: ID of comment object :type obj_id: int :returns: json """ res = Result() c = Comment.objects.get(pk=obj_id) res.append(commentToJson(c)) return JsonResponse(res.asDict())
[ "def", "get", "(", "request", ",", "obj_id", ")", ":", "res", "=", "Result", "(", ")", "c", "=", "Comment", ".", "objects", ".", "get", "(", "pk", "=", "obj_id", ")", "res", ".", "append", "(", "commentToJson", "(", "c", ")", ")", "return", "Json...
Returns a serialized object :param obj_id: ID of comment object :type obj_id: int :returns: json
[ "Returns", "a", "serialized", "object", ":", "param", "obj_id", ":", "ID", "of", "comment", "object", ":", "type", "obj_id", ":", "int", ":", "returns", ":", "json" ]
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/comment.py#L58-L68
theiviaxx/Frog
frog/views/comment.py
post
def post(request): """Returns a serialized object""" data = request.POST or json.loads(request.body)['body'] guid = data.get('guid', None) res = Result() if guid: obj = getObjectsFromGuids([guid,])[0] comment = Comment() comment.comment = data.get('comment', 'No comment') comment.user = request.user comment.user_name = request.user.get_full_name() comment.user_email = request.user.email comment.content_object = obj # For our purposes, we never have more than one site comment.site_id = 1 comment.save() obj.comment_count += 1 obj.save() emailComment(comment, obj, request) res.append(commentToJson(comment)) return JsonResponse(res.asDict())
python
def post(request): """Returns a serialized object""" data = request.POST or json.loads(request.body)['body'] guid = data.get('guid', None) res = Result() if guid: obj = getObjectsFromGuids([guid,])[0] comment = Comment() comment.comment = data.get('comment', 'No comment') comment.user = request.user comment.user_name = request.user.get_full_name() comment.user_email = request.user.email comment.content_object = obj # For our purposes, we never have more than one site comment.site_id = 1 comment.save() obj.comment_count += 1 obj.save() emailComment(comment, obj, request) res.append(commentToJson(comment)) return JsonResponse(res.asDict())
[ "def", "post", "(", "request", ")", ":", "data", "=", "request", ".", "POST", "or", "json", ".", "loads", "(", "request", ".", "body", ")", "[", "'body'", "]", "guid", "=", "data", ".", "get", "(", "'guid'", ",", "None", ")", "res", "=", "Result"...
Returns a serialized object
[ "Returns", "a", "serialized", "object" ]
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/comment.py#L72-L97
theiviaxx/Frog
frog/views/comment.py
put
def put(request, obj_id): """Updates the content of a comment :param obj_id: ID of comment object :type obj_id: int :returns: json """ res = Result() c = Comment.objects.get(pk=obj_id) data = request.PUT or json.loads(request.body)['body'] content = data.get('comment', None) if content: c.comment = content c.save() res.append(commentToJson(c)) return JsonResponse(res.asDict())
python
def put(request, obj_id): """Updates the content of a comment :param obj_id: ID of comment object :type obj_id: int :returns: json """ res = Result() c = Comment.objects.get(pk=obj_id) data = request.PUT or json.loads(request.body)['body'] content = data.get('comment', None) if content: c.comment = content c.save() res.append(commentToJson(c)) return JsonResponse(res.asDict())
[ "def", "put", "(", "request", ",", "obj_id", ")", ":", "res", "=", "Result", "(", ")", "c", "=", "Comment", ".", "objects", ".", "get", "(", "pk", "=", "obj_id", ")", "data", "=", "request", ".", "PUT", "or", "json", ".", "loads", "(", "request",...
Updates the content of a comment :param obj_id: ID of comment object :type obj_id: int :returns: json
[ "Updates", "the", "content", "of", "a", "comment", ":", "param", "obj_id", ":", "ID", "of", "comment", "object", ":", "type", "obj_id", ":", "int", ":", "returns", ":", "json" ]
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/comment.py#L101-L117
theiviaxx/Frog
frog/views/comment.py
commentList
def commentList(request): """Returns a rendered list of comments :returns: html """ if request.method == 'POST': return post(request) comments = [] guid = request.GET.get('guid', None) if guid: obj = getObjectsFromGuids([guid])[0] if obj.AssetType == 1: model = 'image' else: model = 'video' contenttype = ContentType.objects.get(app_label="frog", model=model) comments = Comment.objects.filter(object_pk=obj.id, content_type=contenttype) res = Result() for comment in comments: res.append(commentToJson(comment)) return JsonResponse(res.asDict())
python
def commentList(request): """Returns a rendered list of comments :returns: html """ if request.method == 'POST': return post(request) comments = [] guid = request.GET.get('guid', None) if guid: obj = getObjectsFromGuids([guid])[0] if obj.AssetType == 1: model = 'image' else: model = 'video' contenttype = ContentType.objects.get(app_label="frog", model=model) comments = Comment.objects.filter(object_pk=obj.id, content_type=contenttype) res = Result() for comment in comments: res.append(commentToJson(comment)) return JsonResponse(res.asDict())
[ "def", "commentList", "(", "request", ")", ":", "if", "request", ".", "method", "==", "'POST'", ":", "return", "post", "(", "request", ")", "comments", "=", "[", "]", "guid", "=", "request", ".", "GET", ".", "get", "(", "'guid'", ",", "None", ")", ...
Returns a rendered list of comments :returns: html
[ "Returns", "a", "rendered", "list", "of", "comments", ":", "returns", ":", "html" ]
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/comment.py#L121-L143
theiviaxx/Frog
frog/views/comment.py
emailComment
def emailComment(comment, obj, request): """Send an email to the author about a new comment""" if not obj.author.frog_prefs.get().json()['emailComments']: return if obj.author == request.user: return html = render_to_string('frog/comment_email.html', { 'user': comment.user, 'comment': comment.comment, 'object': obj, 'action_type': 'commented on', 'image': isinstance(obj, Image), 'SITE_URL': FROG_SITE_URL, }) subject = '{}: Comment from {}'.format(getSiteConfig()['name'], comment.user_name) fromemail = comment.user_email to = obj.author.email text_content = 'This is an important message.' html_content = html send_mail(subject, text_content, fromemail, [to], html_message=html_content)
python
def emailComment(comment, obj, request): """Send an email to the author about a new comment""" if not obj.author.frog_prefs.get().json()['emailComments']: return if obj.author == request.user: return html = render_to_string('frog/comment_email.html', { 'user': comment.user, 'comment': comment.comment, 'object': obj, 'action_type': 'commented on', 'image': isinstance(obj, Image), 'SITE_URL': FROG_SITE_URL, }) subject = '{}: Comment from {}'.format(getSiteConfig()['name'], comment.user_name) fromemail = comment.user_email to = obj.author.email text_content = 'This is an important message.' html_content = html send_mail(subject, text_content, fromemail, [to], html_message=html_content)
[ "def", "emailComment", "(", "comment", ",", "obj", ",", "request", ")", ":", "if", "not", "obj", ".", "author", ".", "frog_prefs", ".", "get", "(", ")", ".", "json", "(", ")", "[", "'emailComments'", "]", ":", "return", "if", "obj", ".", "author", ...
Send an email to the author about a new comment
[ "Send", "an", "email", "to", "the", "author", "about", "a", "new", "comment" ]
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/comment.py#L146-L169
gmr/tredis
tredis/cluster.py
ClusterMixin.cluster_nodes
def cluster_nodes(self): """Each node in a Redis Cluster has its view of the current cluster configuration, given by the set of known nodes, the state of the connection we have with such nodes, their flags, properties and assigned slots, and so forth. ``CLUSTER NODES`` provides all this information, that is, the current cluster configuration of the node we are contacting, in a serialization format which happens to be exactly the same as the one used by Redis Cluster itself in order to store on disk the cluster state (however the on disk cluster state has a few additional info appended at the end). Note that normally clients willing to fetch the map between Cluster hash slots and node addresses should use ``CLUSTER SLOTS`` instead. ``CLUSTER NODES``, that provides more information, should be used for administrative tasks, debugging, and configuration inspections. It is also used by ``redis-trib`` in order to manage a cluster. .. versionadded:: 0.7.0 :rtype: list(:class:`~tredis.cluster.ClusterNode`) :raises: :exc:`~tredis.exceptions.RedisError` """ def format_response(result): values = [] for row in result.decode('utf-8').split('\n'): if not row: continue parts = row.split(' ') slots = [] for slot in parts[8:]: if '-' in slot: sparts = slot.split('-') slots.append((int(sparts[0]), int(sparts[1]))) else: slots.append((int(slot), int(slot))) ip_port = common.split_connection_host_port(parts[1]) values.append( ClusterNode(parts[0], ip_port[0], ip_port[1], parts[2], parts[3], int(parts[4]), int(parts[5]), int(parts[6]), parts[7], slots)) return values return self._execute( ['CLUSTER', 'NODES'], format_callback=format_response)
python
def cluster_nodes(self): """Each node in a Redis Cluster has its view of the current cluster configuration, given by the set of known nodes, the state of the connection we have with such nodes, their flags, properties and assigned slots, and so forth. ``CLUSTER NODES`` provides all this information, that is, the current cluster configuration of the node we are contacting, in a serialization format which happens to be exactly the same as the one used by Redis Cluster itself in order to store on disk the cluster state (however the on disk cluster state has a few additional info appended at the end). Note that normally clients willing to fetch the map between Cluster hash slots and node addresses should use ``CLUSTER SLOTS`` instead. ``CLUSTER NODES``, that provides more information, should be used for administrative tasks, debugging, and configuration inspections. It is also used by ``redis-trib`` in order to manage a cluster. .. versionadded:: 0.7.0 :rtype: list(:class:`~tredis.cluster.ClusterNode`) :raises: :exc:`~tredis.exceptions.RedisError` """ def format_response(result): values = [] for row in result.decode('utf-8').split('\n'): if not row: continue parts = row.split(' ') slots = [] for slot in parts[8:]: if '-' in slot: sparts = slot.split('-') slots.append((int(sparts[0]), int(sparts[1]))) else: slots.append((int(slot), int(slot))) ip_port = common.split_connection_host_port(parts[1]) values.append( ClusterNode(parts[0], ip_port[0], ip_port[1], parts[2], parts[3], int(parts[4]), int(parts[5]), int(parts[6]), parts[7], slots)) return values return self._execute( ['CLUSTER', 'NODES'], format_callback=format_response)
[ "def", "cluster_nodes", "(", "self", ")", ":", "def", "format_response", "(", "result", ")", ":", "values", "=", "[", "]", "for", "row", "in", "result", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", ":", "if", "not", "row", "...
Each node in a Redis Cluster has its view of the current cluster configuration, given by the set of known nodes, the state of the connection we have with such nodes, their flags, properties and assigned slots, and so forth. ``CLUSTER NODES`` provides all this information, that is, the current cluster configuration of the node we are contacting, in a serialization format which happens to be exactly the same as the one used by Redis Cluster itself in order to store on disk the cluster state (however the on disk cluster state has a few additional info appended at the end). Note that normally clients willing to fetch the map between Cluster hash slots and node addresses should use ``CLUSTER SLOTS`` instead. ``CLUSTER NODES``, that provides more information, should be used for administrative tasks, debugging, and configuration inspections. It is also used by ``redis-trib`` in order to manage a cluster. .. versionadded:: 0.7.0 :rtype: list(:class:`~tredis.cluster.ClusterNode`) :raises: :exc:`~tredis.exceptions.RedisError`
[ "Each", "node", "in", "a", "Redis", "Cluster", "has", "its", "view", "of", "the", "current", "cluster", "configuration", "given", "by", "the", "set", "of", "known", "nodes", "the", "state", "of", "the", "connection", "we", "have", "with", "such", "nodes", ...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/cluster.py#L125-L171
gmr/tredis
tredis/sets.py
SetsMixin.sadd
def sadd(self, key, *members): """Add the specified members to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are added. If more than one member is passed in and not all members are added, the number of added members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be added. :param key: The key of the set :type key: :class:`str`, :class:`bytes` :param members: One or more positional arguments to add to the set :type key: :class:`str`, :class:`bytes` :returns: Number of items added to the set :rtype: bool, int """ return self._execute([b'SADD', key] + list(members), len(members))
python
def sadd(self, key, *members): """Add the specified members to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are added. If more than one member is passed in and not all members are added, the number of added members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be added. :param key: The key of the set :type key: :class:`str`, :class:`bytes` :param members: One or more positional arguments to add to the set :type key: :class:`str`, :class:`bytes` :returns: Number of items added to the set :rtype: bool, int """ return self._execute([b'SADD', key] + list(members), len(members))
[ "def", "sadd", "(", "self", ",", "key", ",", "*", "members", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SADD'", ",", "key", "]", "+", "list", "(", "members", ")", ",", "len", "(", "members", ")", ")" ]
Add the specified members to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are added. If more than one member is passed in and not all members are added, the number of added members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be added. :param key: The key of the set :type key: :class:`str`, :class:`bytes` :param members: One or more positional arguments to add to the set :type key: :class:`str`, :class:`bytes` :returns: Number of items added to the set :rtype: bool, int
[ "Add", "the", "specified", "members", "to", "the", "set", "stored", "at", "key", ".", "Specified", "members", "that", "are", "already", "a", "member", "of", "this", "set", "are", "ignored", ".", "If", "key", "does", "not", "exist", "a", "new", "set", "...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L12-L36
gmr/tredis
tredis/sets.py
SetsMixin.sdiffstore
def sdiffstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sdiff`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the diff into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SDIFFSTORE', destination] + list(keys))
python
def sdiffstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sdiff`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the diff into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SDIFFSTORE', destination] + list(keys))
[ "def", "sdiffstore", "(", "self", ",", "destination", ",", "*", "keys", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SDIFFSTORE'", ",", "destination", "]", "+", "list", "(", "keys", ")", ")" ]
This command is equal to :meth:`~tredis.RedisClient.sdiff`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the diff into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
[ "This", "command", "is", "equal", "to", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "sdiff", "but", "instead", "of", "returning", "the", "resulting", "set", "it", "is", "stored", "in", "destination", "." ]
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L82-L101
gmr/tredis
tredis/sets.py
SetsMixin.sinterstore
def sinterstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sinter`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N*M)`` worst case where ``N`` is the cardinality of the smallest set and ``M`` is the number of sets. :param destination: The set to store the intersection into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SINTERSTORE', destination] + list(keys))
python
def sinterstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sinter`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N*M)`` worst case where ``N`` is the cardinality of the smallest set and ``M`` is the number of sets. :param destination: The set to store the intersection into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SINTERSTORE', destination] + list(keys))
[ "def", "sinterstore", "(", "self", ",", "destination", ",", "*", "keys", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SINTERSTORE'", ",", "destination", "]", "+", "list", "(", "keys", ")", ")" ]
This command is equal to :meth:`~tredis.RedisClient.sinter`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N*M)`` worst case where ``N`` is the cardinality of the smallest set and ``M`` is the number of sets. :param destination: The set to store the intersection into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
[ "This", "command", "is", "equal", "to", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "sinter", "but", "instead", "of", "returning", "the", "resulting", "set", "it", "is", "stored", "in", "destination", "." ]
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L133-L152
gmr/tredis
tredis/sets.py
SetsMixin.smove
def smove(self, source, destination, member): """Move member from the set at source to the set at destination. This operation is atomic. In every given moment the element will appear to be a member of source or destination for other clients. If the source set does not exist or does not contain the specified element, no operation is performed and :data:`False` is returned. Otherwise, the element is removed from the source set and added to the destination set. When the specified element already exists in the destination set, it is only removed from the source set. An error is returned if source or destination does not hold a set value. .. note:: **Time complexity**: ``O(1)`` :param source: The source set key :type source: :class:`str`, :class:`bytes` :param destination: The destination set key :type destination: :class:`str`, :class:`bytes` :param member: The member value to move :type member: :class:`str`, :class:`bytes` :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SMOVE', source, destination, member], 1)
python
def smove(self, source, destination, member): """Move member from the set at source to the set at destination. This operation is atomic. In every given moment the element will appear to be a member of source or destination for other clients. If the source set does not exist or does not contain the specified element, no operation is performed and :data:`False` is returned. Otherwise, the element is removed from the source set and added to the destination set. When the specified element already exists in the destination set, it is only removed from the source set. An error is returned if source or destination does not hold a set value. .. note:: **Time complexity**: ``O(1)`` :param source: The source set key :type source: :class:`str`, :class:`bytes` :param destination: The destination set key :type destination: :class:`str`, :class:`bytes` :param member: The member value to move :type member: :class:`str`, :class:`bytes` :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SMOVE', source, destination, member], 1)
[ "def", "smove", "(", "self", ",", "source", ",", "destination", ",", "member", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SMOVE'", ",", "source", ",", "destination", ",", "member", "]", ",", "1", ")" ]
Move member from the set at source to the set at destination. This operation is atomic. In every given moment the element will appear to be a member of source or destination for other clients. If the source set does not exist or does not contain the specified element, no operation is performed and :data:`False` is returned. Otherwise, the element is removed from the source set and added to the destination set. When the specified element already exists in the destination set, it is only removed from the source set. An error is returned if source or destination does not hold a set value. .. note:: **Time complexity**: ``O(1)`` :param source: The source set key :type source: :class:`str`, :class:`bytes` :param destination: The destination set key :type destination: :class:`str`, :class:`bytes` :param member: The member value to move :type member: :class:`str`, :class:`bytes` :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError`
[ "Move", "member", "from", "the", "set", "at", "source", "to", "the", "set", "at", "destination", ".", "This", "operation", "is", "atomic", ".", "In", "every", "given", "moment", "the", "element", "will", "appear", "to", "be", "a", "member", "of", "source...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L190-L218
gmr/tredis
tredis/sets.py
SetsMixin.spop
def spop(self, key, count=None): """Removes and returns one or more random elements from the set value store at key. This operation is similar to :meth:`~tredis.RedisClient.srandmember`, that returns one or more random elements from a set but does not remove it. The count argument will be available in a later version and is not available in 2.6, 2.8, 3.0 Redis 3.2 will be the first version where an optional count argument can be passed to :meth:`~tredis.RedisClient.spop` in order to retrieve multiple elements in a single call. The implementation is already available in the unstable branch. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'SPOP', key] if count: # pragma: nocover command.append(ascii(count).encode('ascii')) return self._execute(command)
python
def spop(self, key, count=None): """Removes and returns one or more random elements from the set value store at key. This operation is similar to :meth:`~tredis.RedisClient.srandmember`, that returns one or more random elements from a set but does not remove it. The count argument will be available in a later version and is not available in 2.6, 2.8, 3.0 Redis 3.2 will be the first version where an optional count argument can be passed to :meth:`~tredis.RedisClient.spop` in order to retrieve multiple elements in a single call. The implementation is already available in the unstable branch. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'SPOP', key] if count: # pragma: nocover command.append(ascii(count).encode('ascii')) return self._execute(command)
[ "def", "spop", "(", "self", ",", "key", ",", "count", "=", "None", ")", ":", "command", "=", "[", "b'SPOP'", ",", "key", "]", "if", "count", ":", "# pragma: nocover", "command", ".", "append", "(", "ascii", "(", "count", ")", ".", "encode", "(", "'...
Removes and returns one or more random elements from the set value store at key. This operation is similar to :meth:`~tredis.RedisClient.srandmember`, that returns one or more random elements from a set but does not remove it. The count argument will be available in a later version and is not available in 2.6, 2.8, 3.0 Redis 3.2 will be the first version where an optional count argument can be passed to :meth:`~tredis.RedisClient.spop` in order to retrieve multiple elements in a single call. The implementation is already available in the unstable branch. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError`
[ "Removes", "and", "returns", "one", "or", "more", "random", "elements", "from", "the", "set", "value", "store", "at", "key", "." ]
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L220-L251
gmr/tredis
tredis/sets.py
SetsMixin.srandmember
def srandmember(self, key, count=None): """When called with just the key argument, return a random element from the set value stored at key. Starting from Redis version 2.6, when called with the additional count argument, return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. In this case the number of returned elements is the absolute value of the specified count. When called with just the key argument, the operation is similar to :meth:`~tredis.RedisClient.spop`, however while :meth:`~tredis.RedisClient.spop` also removes the randomly selected element from the set, :meth:`~tredis.RedisClient.srandmember` will just return a random element without altering the original set in any way. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'SRANDMEMBER', key] if count: command.append(ascii(count).encode('ascii')) return self._execute(command)
python
def srandmember(self, key, count=None): """When called with just the key argument, return a random element from the set value stored at key. Starting from Redis version 2.6, when called with the additional count argument, return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. In this case the number of returned elements is the absolute value of the specified count. When called with just the key argument, the operation is similar to :meth:`~tredis.RedisClient.spop`, however while :meth:`~tredis.RedisClient.spop` also removes the randomly selected element from the set, :meth:`~tredis.RedisClient.srandmember` will just return a random element without altering the original set in any way. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'SRANDMEMBER', key] if count: command.append(ascii(count).encode('ascii')) return self._execute(command)
[ "def", "srandmember", "(", "self", ",", "key", ",", "count", "=", "None", ")", ":", "command", "=", "[", "b'SRANDMEMBER'", ",", "key", "]", "if", "count", ":", "command", ".", "append", "(", "ascii", "(", "count", ")", ".", "encode", "(", "'ascii'", ...
When called with just the key argument, return a random element from the set value stored at key. Starting from Redis version 2.6, when called with the additional count argument, return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. In this case the number of returned elements is the absolute value of the specified count. When called with just the key argument, the operation is similar to :meth:`~tredis.RedisClient.spop`, however while :meth:`~tredis.RedisClient.spop` also removes the randomly selected element from the set, :meth:`~tredis.RedisClient.srandmember` will just return a random element without altering the original set in any way. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError`
[ "When", "called", "with", "just", "the", "key", "argument", "return", "a", "random", "element", "from", "the", "set", "value", "stored", "at", "key", "." ]
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L253-L285
gmr/tredis
tredis/sets.py
SetsMixin.srem
def srem(self, key, *members): """Remove the specified members from the set stored at key. Specified members that are not a member of this set are ignored. If key does not exist, it is treated as an empty set and this command returns ``0``. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are removed. If more than one member is passed in and not all members are removed, the number of removed members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be removed. :param key: The key to remove the member from :type key: :class:`str`, :class:`bytes` :param mixed members: One or more member values to remove :rtype: bool, int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SREM', key] + list(members), len(members))
python
def srem(self, key, *members): """Remove the specified members from the set stored at key. Specified members that are not a member of this set are ignored. If key does not exist, it is treated as an empty set and this command returns ``0``. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are removed. If more than one member is passed in and not all members are removed, the number of removed members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be removed. :param key: The key to remove the member from :type key: :class:`str`, :class:`bytes` :param mixed members: One or more member values to remove :rtype: bool, int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SREM', key] + list(members), len(members))
[ "def", "srem", "(", "self", ",", "key", ",", "*", "members", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SREM'", ",", "key", "]", "+", "list", "(", "members", ")", ",", "len", "(", "members", ")", ")" ]
Remove the specified members from the set stored at key. Specified members that are not a member of this set are ignored. If key does not exist, it is treated as an empty set and this command returns ``0``. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are removed. If more than one member is passed in and not all members are removed, the number of removed members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be removed. :param key: The key to remove the member from :type key: :class:`str`, :class:`bytes` :param mixed members: One or more member values to remove :rtype: bool, int :raises: :exc:`~tredis.exceptions.RedisError`
[ "Remove", "the", "specified", "members", "from", "the", "set", "stored", "at", "key", ".", "Specified", "members", "that", "are", "not", "a", "member", "of", "this", "set", "are", "ignored", ".", "If", "key", "does", "not", "exist", "it", "is", "treated"...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L287-L310
gmr/tredis
tredis/sets.py
SetsMixin.sscan
def sscan(self, key, cursor=0, pattern=None, count=None): """The :meth:`~tredis.RedisClient.sscan` command and the closely related commands :meth:`~tredis.RedisClient.scan`, :meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan` are used in order to incrementally iterate over a collection of elements. - :meth:`~tredis.RedisClient.scan` iterates the set of keys in the currently selected Redis database. - :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types. - :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and their associated values. - :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set types and their associated scores. **Basic usage** :meth:`~tredis.RedisClient.sscan` is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor that the user needs to use as the cursor argument in the next call. An iteration starts when the cursor is set to ``0``, and terminates when the cursor returned by the server is ``0``. For more information on :meth:`~tredis.RedisClient.scan`, visit the `Redis docs on scan <http://redis.io/commands/scan>`_. .. note:: **Time complexity**: ``O(1)`` for every call. ``O(N)`` for a complete iteration, including enough command calls for the cursor to return back to ``0``. ``N`` is the number of elements inside the collection. :param key: The key to scan :type key: :class:`str`, :class:`bytes` :param int cursor: The server specified cursor value or ``0`` :param pattern: An optional pattern to apply for key matching :type pattern: :class:`str`, :class:`bytes` :param int count: An optional amount of work to perform in the scan :rtype: int, list :returns: A tuple containing the cursor and the list of set items :raises: :exc:`~tredis.exceptions.RedisError` """ def format_response(value): """Format the response from redis :param tuple value: The return response from redis :rtype: tuple(int, list) """ return int(value[0]), value[1] command = [b'SSCAN', key, ascii(cursor).encode('ascii')] if pattern: command += [b'MATCH', pattern] if count: command += [b'COUNT', ascii(count).encode('ascii')] return self._execute(command, format_callback=format_response)
python
def sscan(self, key, cursor=0, pattern=None, count=None): """The :meth:`~tredis.RedisClient.sscan` command and the closely related commands :meth:`~tredis.RedisClient.scan`, :meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan` are used in order to incrementally iterate over a collection of elements. - :meth:`~tredis.RedisClient.scan` iterates the set of keys in the currently selected Redis database. - :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types. - :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and their associated values. - :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set types and their associated scores. **Basic usage** :meth:`~tredis.RedisClient.sscan` is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor that the user needs to use as the cursor argument in the next call. An iteration starts when the cursor is set to ``0``, and terminates when the cursor returned by the server is ``0``. For more information on :meth:`~tredis.RedisClient.scan`, visit the `Redis docs on scan <http://redis.io/commands/scan>`_. .. note:: **Time complexity**: ``O(1)`` for every call. ``O(N)`` for a complete iteration, including enough command calls for the cursor to return back to ``0``. ``N`` is the number of elements inside the collection. :param key: The key to scan :type key: :class:`str`, :class:`bytes` :param int cursor: The server specified cursor value or ``0`` :param pattern: An optional pattern to apply for key matching :type pattern: :class:`str`, :class:`bytes` :param int count: An optional amount of work to perform in the scan :rtype: int, list :returns: A tuple containing the cursor and the list of set items :raises: :exc:`~tredis.exceptions.RedisError` """ def format_response(value): """Format the response from redis :param tuple value: The return response from redis :rtype: tuple(int, list) """ return int(value[0]), value[1] command = [b'SSCAN', key, ascii(cursor).encode('ascii')] if pattern: command += [b'MATCH', pattern] if count: command += [b'COUNT', ascii(count).encode('ascii')] return self._execute(command, format_callback=format_response)
[ "def", "sscan", "(", "self", ",", "key", ",", "cursor", "=", "0", ",", "pattern", "=", "None", ",", "count", "=", "None", ")", ":", "def", "format_response", "(", "value", ")", ":", "\"\"\"Format the response from redis\n\n :param tuple value: The retur...
The :meth:`~tredis.RedisClient.sscan` command and the closely related commands :meth:`~tredis.RedisClient.scan`, :meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan` are used in order to incrementally iterate over a collection of elements. - :meth:`~tredis.RedisClient.scan` iterates the set of keys in the currently selected Redis database. - :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types. - :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and their associated values. - :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set types and their associated scores. **Basic usage** :meth:`~tredis.RedisClient.sscan` is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor that the user needs to use as the cursor argument in the next call. An iteration starts when the cursor is set to ``0``, and terminates when the cursor returned by the server is ``0``. For more information on :meth:`~tredis.RedisClient.scan`, visit the `Redis docs on scan <http://redis.io/commands/scan>`_. .. note:: **Time complexity**: ``O(1)`` for every call. ``O(N)`` for a complete iteration, including enough command calls for the cursor to return back to ``0``. ``N`` is the number of elements inside the collection. :param key: The key to scan :type key: :class:`str`, :class:`bytes` :param int cursor: The server specified cursor value or ``0`` :param pattern: An optional pattern to apply for key matching :type pattern: :class:`str`, :class:`bytes` :param int count: An optional amount of work to perform in the scan :rtype: int, list :returns: A tuple containing the cursor and the list of set items :raises: :exc:`~tredis.exceptions.RedisError`
[ "The", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "sscan", "command", "and", "the", "closely", "related", "commands", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "scan", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "hscan", ...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L312-L373
gmr/tredis
tredis/sets.py
SetsMixin.sunionstore
def sunionstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sunion`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the union into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SUNIONSTORE', destination] + list(keys))
python
def sunionstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sunion`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the union into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SUNIONSTORE', destination] + list(keys))
[ "def", "sunionstore", "(", "self", ",", "destination", ",", "*", "keys", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SUNIONSTORE'", ",", "destination", "]", "+", "list", "(", "keys", ")", ")" ]
This command is equal to :meth:`~tredis.RedisClient.sunion`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the union into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
[ "This", "command", "is", "equal", "to", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "sunion", "but", "instead", "of", "returning", "the", "resulting", "set", "it", "is", "stored", "in", "destination", "." ]
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L403-L422
radzak/rtv-downloader
rtv/extractors/tokfm.py
TokFm._extract_id
def _extract_id(self) -> str: """ Get video_id needed to obtain the real_url of the video. Raises: VideoIdNotMatchedError: If video_id is not matched with regular expression. """ match = re.match(self._VALID_URL, self.url) if match: return match.group('video_id') else: raise VideoIdNotMatchedError
python
def _extract_id(self) -> str: """ Get video_id needed to obtain the real_url of the video. Raises: VideoIdNotMatchedError: If video_id is not matched with regular expression. """ match = re.match(self._VALID_URL, self.url) if match: return match.group('video_id') else: raise VideoIdNotMatchedError
[ "def", "_extract_id", "(", "self", ")", "->", "str", ":", "match", "=", "re", ".", "match", "(", "self", ".", "_VALID_URL", ",", "self", ".", "url", ")", "if", "match", ":", "return", "match", ".", "group", "(", "'video_id'", ")", "else", ":", "rai...
Get video_id needed to obtain the real_url of the video. Raises: VideoIdNotMatchedError: If video_id is not matched with regular expression.
[ "Get", "video_id", "needed", "to", "obtain", "the", "real_url", "of", "the", "video", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/extractors/tokfm.py#L35-L48
radzak/rtv-downloader
rtv/extractors/tokfm.py
TokFm._process_info
def _process_info(raw_info: VideoInfo) -> VideoInfo: """Process raw information about the video (parse date, etc.).""" raw_date = raw_info.date date = datetime.strptime(raw_date, '%Y-%m-%d %H:%M') # 2018-04-05 17:00 video_info = raw_info._replace(date=date) return video_info
python
def _process_info(raw_info: VideoInfo) -> VideoInfo: """Process raw information about the video (parse date, etc.).""" raw_date = raw_info.date date = datetime.strptime(raw_date, '%Y-%m-%d %H:%M') # 2018-04-05 17:00 video_info = raw_info._replace(date=date) return video_info
[ "def", "_process_info", "(", "raw_info", ":", "VideoInfo", ")", "->", "VideoInfo", ":", "raw_date", "=", "raw_info", ".", "date", "date", "=", "datetime", ".", "strptime", "(", "raw_date", ",", "'%Y-%m-%d %H:%M'", ")", "# 2018-04-05 17:00", "video_info", "=", ...
Process raw information about the video (parse date, etc.).
[ "Process", "raw", "information", "about", "the", "video", "(", "parse", "date", "etc", ".", ")", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/extractors/tokfm.py#L51-L56
lobocv/pyperform
pyperform/tools.py
convert_time_units
def convert_time_units(t): """ Convert time in seconds into reasonable time units. """ if t == 0: return '0 s' order = log10(t) if -9 < order < -6: time_units = 'ns' factor = 1000000000 elif -6 <= order < -3: time_units = 'us' factor = 1000000 elif -3 <= order < -1: time_units = 'ms' factor = 1000. elif -1 <= order: time_units = 's' factor = 1 return "{:.3f} {}".format(factor * t, time_units)
python
def convert_time_units(t): """ Convert time in seconds into reasonable time units. """ if t == 0: return '0 s' order = log10(t) if -9 < order < -6: time_units = 'ns' factor = 1000000000 elif -6 <= order < -3: time_units = 'us' factor = 1000000 elif -3 <= order < -1: time_units = 'ms' factor = 1000. elif -1 <= order: time_units = 's' factor = 1 return "{:.3f} {}".format(factor * t, time_units)
[ "def", "convert_time_units", "(", "t", ")", ":", "if", "t", "==", "0", ":", "return", "'0 s'", "order", "=", "log10", "(", "t", ")", "if", "-", "9", "<", "order", "<", "-", "6", ":", "time_units", "=", "'ns'", "factor", "=", "1000000000", "elif", ...
Convert time in seconds into reasonable time units.
[ "Convert", "time", "in", "seconds", "into", "reasonable", "time", "units", "." ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L17-L34
lobocv/pyperform
pyperform/tools.py
globalize_indentation
def globalize_indentation(src): """ Strip the indentation level so the code runs in the global scope. """ lines = src.splitlines() indent = len(lines[0]) - len(lines[0].strip(' ')) func_src = '' for ii, l in enumerate(src.splitlines()): line = l[indent:] func_src += line + '\n' return func_src
python
def globalize_indentation(src): """ Strip the indentation level so the code runs in the global scope. """ lines = src.splitlines() indent = len(lines[0]) - len(lines[0].strip(' ')) func_src = '' for ii, l in enumerate(src.splitlines()): line = l[indent:] func_src += line + '\n' return func_src
[ "def", "globalize_indentation", "(", "src", ")", ":", "lines", "=", "src", ".", "splitlines", "(", ")", "indent", "=", "len", "(", "lines", "[", "0", "]", ")", "-", "len", "(", "lines", "[", "0", "]", ".", "strip", "(", "' '", ")", ")", "func_src...
Strip the indentation level so the code runs in the global scope.
[ "Strip", "the", "indentation", "level", "so", "the", "code", "runs", "in", "the", "global", "scope", "." ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L37-L45
lobocv/pyperform
pyperform/tools.py
remove_decorators
def remove_decorators(src): """ Remove decorators from the source code """ src = src.strip() src_lines = src.splitlines() multi_line = False n_deleted = 0 for n in range(len(src_lines)): line = src_lines[n - n_deleted].strip() if (line.startswith('@') and 'Benchmark' in line) or multi_line: del src_lines[n - n_deleted] n_deleted += 1 if line.endswith(')'): multi_line = False else: multi_line = True setup_src = '\n'.join(src_lines) return setup_src
python
def remove_decorators(src): """ Remove decorators from the source code """ src = src.strip() src_lines = src.splitlines() multi_line = False n_deleted = 0 for n in range(len(src_lines)): line = src_lines[n - n_deleted].strip() if (line.startswith('@') and 'Benchmark' in line) or multi_line: del src_lines[n - n_deleted] n_deleted += 1 if line.endswith(')'): multi_line = False else: multi_line = True setup_src = '\n'.join(src_lines) return setup_src
[ "def", "remove_decorators", "(", "src", ")", ":", "src", "=", "src", ".", "strip", "(", ")", "src_lines", "=", "src", ".", "splitlines", "(", ")", "multi_line", "=", "False", "n_deleted", "=", "0", "for", "n", "in", "range", "(", "len", "(", "src_lin...
Remove decorators from the source code
[ "Remove", "decorators", "from", "the", "source", "code" ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L48-L64
lobocv/pyperform
pyperform/tools.py
walk_tree
def walk_tree(start, attr): """ Recursively walk through a tree relationship. This iterates a tree in a top-down approach, fully reaching the end of a lineage before moving onto the next sibling of that generation. """ path = [start] for child in path: yield child idx = path.index(child) for grandchild in reversed(getattr(child, attr)): path.insert(idx + 1, grandchild)
python
def walk_tree(start, attr): """ Recursively walk through a tree relationship. This iterates a tree in a top-down approach, fully reaching the end of a lineage before moving onto the next sibling of that generation. """ path = [start] for child in path: yield child idx = path.index(child) for grandchild in reversed(getattr(child, attr)): path.insert(idx + 1, grandchild)
[ "def", "walk_tree", "(", "start", ",", "attr", ")", ":", "path", "=", "[", "start", "]", "for", "child", "in", "path", ":", "yield", "child", "idx", "=", "path", ".", "index", "(", "child", ")", "for", "grandchild", "in", "reversed", "(", "getattr", ...
Recursively walk through a tree relationship. This iterates a tree in a top-down approach, fully reaching the end of a lineage before moving onto the next sibling of that generation.
[ "Recursively", "walk", "through", "a", "tree", "relationship", ".", "This", "iterates", "a", "tree", "in", "a", "top", "-", "down", "approach", "fully", "reaching", "the", "end", "of", "a", "lineage", "before", "moving", "onto", "the", "next", "sibling", "...
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L137-L147
lobocv/pyperform
pyperform/benchmarkedclass.py
BenchmarkedClass.validate
def validate(self, benchmarks): """ Execute the code once to get it's results (to be used in function validation). Compare the result to the first function in the group. :param benchmarks: list of benchmarks to validate. """ class_code = self.setup_src instance_creation = '\ninstance = {}'.format(self.stmt) for i, benchmark in enumerate(benchmarks): if not benchmark.result_validation: break validation_code = class_code + instance_creation + '\nvalidation_result = ' + benchmark.stmt validation_scope = {} exec(validation_code, validation_scope) # Store the result in the first function in the group. if i == 0: compare_against_function = benchmarks[0].callable.__name__ compare_against_result = validation_scope['validation_result'] logging.info('PyPerform: Validating group "{b.group}" against method ' '"{b.classname}.{b.callable.__name__}"'.format(b=benchmarks[0])) else: if compare_against_result == validation_scope['validation_result']: logging.info('PyPerform: Validating {b.classname}.{b.callable.__name__}......PASSED!' .format(b=benchmark)) else: error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}' raise ValidationError(error.format(compare_against_function, benchmark.callable.__name__, compare_against_result, validation_scope['validation_result']))
python
def validate(self, benchmarks): """ Execute the code once to get it's results (to be used in function validation). Compare the result to the first function in the group. :param benchmarks: list of benchmarks to validate. """ class_code = self.setup_src instance_creation = '\ninstance = {}'.format(self.stmt) for i, benchmark in enumerate(benchmarks): if not benchmark.result_validation: break validation_code = class_code + instance_creation + '\nvalidation_result = ' + benchmark.stmt validation_scope = {} exec(validation_code, validation_scope) # Store the result in the first function in the group. if i == 0: compare_against_function = benchmarks[0].callable.__name__ compare_against_result = validation_scope['validation_result'] logging.info('PyPerform: Validating group "{b.group}" against method ' '"{b.classname}.{b.callable.__name__}"'.format(b=benchmarks[0])) else: if compare_against_result == validation_scope['validation_result']: logging.info('PyPerform: Validating {b.classname}.{b.callable.__name__}......PASSED!' .format(b=benchmark)) else: error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}' raise ValidationError(error.format(compare_against_function, benchmark.callable.__name__, compare_against_result, validation_scope['validation_result']))
[ "def", "validate", "(", "self", ",", "benchmarks", ")", ":", "class_code", "=", "self", ".", "setup_src", "instance_creation", "=", "'\\ninstance = {}'", ".", "format", "(", "self", ".", "stmt", ")", "for", "i", ",", "benchmark", "in", "enumerate", "(", "b...
Execute the code once to get it's results (to be used in function validation). Compare the result to the first function in the group. :param benchmarks: list of benchmarks to validate.
[ "Execute", "the", "code", "once", "to", "get", "it", "s", "results", "(", "to", "be", "used", "in", "function", "validation", ")", ".", "Compare", "the", "result", "to", "the", "first", "function", "in", "the", "group", ".", ":", "param", "benchmarks", ...
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/benchmarkedclass.py#L36-L64
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.is_cache_valid
def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False
python
def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False
[ "def", "is_cache_valid", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "cache_path_cache", ")", ":", "mod_time", "=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "cache_path_cache", ")", "current_time", "=", ...
Determines if the cache files have expired, or if it is still valid
[ "Determines", "if", "the", "cache", "files", "have", "expired", "or", "if", "it", "is", "still", "valid" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L88-L99
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.read_settings
def read_settings(self, configfile): ''' Reads the settings from the ec2.ini file ''' if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() config.read(configfile) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds_enabled = True if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): self.elasticache_enabled = config.getboolean('ec2', 'elasticache') # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False # Instance states to be gathered in inventory. Default is 'running'. # Setting 'all_instances' to 'yes' overrides this option. ec2_valid_instance_states = [ 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' ] self.ec2_instance_states = [] if self.all_instances: self.ec2_instance_states = ec2_valid_instance_states elif config.has_option('ec2', 'instance_states'): for instance_state in config.get('ec2', 'instance_states').split(','): instance_state = instance_state.strip() if instance_state not in ec2_valid_instance_states: continue self.ec2_instance_states.append(instance_state) else: self.ec2_instance_states = ['running'] # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False # Return all ElastiCache replication groups? # (if ElastiCache is enabled) if (config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled): self.all_elasticache_replication_groups = config.getboolean( 'ec2', 'all_elasticache_replication_groups') else: self.all_elasticache_replication_groups = False # Return all ElastiCache clusters? (if ElastiCache is enabled) if (config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled): self.all_elasticache_clusters = config.getboolean( 'ec2', 'all_elasticache_clusters') else: self.all_elasticache_clusters = False # Return all ElastiCache nodes? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') else: self.all_elasticache_nodes = False # boto configuration profile (prefer CLI argument) if config.has_option('ec2', 'boto_profile') and not self.boto_profile: self.boto_profile = config.get('ec2', 'boto_profile') # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) self.cache_path_cache = cache_dir + "/ansible-ec2.cache" self.cache_path_index = cache_dir + "/ansible-ec2.index" self.cache_max_age = config.getint('ec2', 'cache_max_age') if config.has_option('ec2', 'expand_csv_tags'): self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') else: self.expand_csv_tags = False # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') else: self.nested_groups = False # Replace dash or not in group names if config.has_option('ec2', 'replace_dash_in_groups'): self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') else: self.replace_dash_in_groups = True # Configure which groups should be created. group_by_options = [ 'group_by_instance_id', 'group_by_region', 'group_by_availability_zone', 'group_by_ami_id', 'group_by_instance_type', 'group_by_key_pair', 'group_by_vpc_id', 'group_by_security_group', 'group_by_tag_keys', 'group_by_tag_none', 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', 'group_by_elasticache_engine', 'group_by_elasticache_cluster', 'group_by_elasticache_parameter_group', 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): setattr(self, option, config.getboolean('ec2', option)) else: setattr(self, option, True) # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except configparser.NoOptionError: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get('ec2', 'pattern_exclude'); if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except configparser.NoOptionError: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): for instance_filter in config.get('ec2', 'instance_filters', '').split(','): instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] if not filter_key: continue self.ec2_instance_filters[filter_key].append(filter_value)
python
def read_settings(self, configfile): ''' Reads the settings from the ec2.ini file ''' if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() config.read(configfile) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds_enabled = True if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): self.elasticache_enabled = config.getboolean('ec2', 'elasticache') # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False # Instance states to be gathered in inventory. Default is 'running'. # Setting 'all_instances' to 'yes' overrides this option. ec2_valid_instance_states = [ 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' ] self.ec2_instance_states = [] if self.all_instances: self.ec2_instance_states = ec2_valid_instance_states elif config.has_option('ec2', 'instance_states'): for instance_state in config.get('ec2', 'instance_states').split(','): instance_state = instance_state.strip() if instance_state not in ec2_valid_instance_states: continue self.ec2_instance_states.append(instance_state) else: self.ec2_instance_states = ['running'] # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False # Return all ElastiCache replication groups? # (if ElastiCache is enabled) if (config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled): self.all_elasticache_replication_groups = config.getboolean( 'ec2', 'all_elasticache_replication_groups') else: self.all_elasticache_replication_groups = False # Return all ElastiCache clusters? (if ElastiCache is enabled) if (config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled): self.all_elasticache_clusters = config.getboolean( 'ec2', 'all_elasticache_clusters') else: self.all_elasticache_clusters = False # Return all ElastiCache nodes? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') else: self.all_elasticache_nodes = False # boto configuration profile (prefer CLI argument) if config.has_option('ec2', 'boto_profile') and not self.boto_profile: self.boto_profile = config.get('ec2', 'boto_profile') # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) self.cache_path_cache = cache_dir + "/ansible-ec2.cache" self.cache_path_index = cache_dir + "/ansible-ec2.index" self.cache_max_age = config.getint('ec2', 'cache_max_age') if config.has_option('ec2', 'expand_csv_tags'): self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') else: self.expand_csv_tags = False # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') else: self.nested_groups = False # Replace dash or not in group names if config.has_option('ec2', 'replace_dash_in_groups'): self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') else: self.replace_dash_in_groups = True # Configure which groups should be created. group_by_options = [ 'group_by_instance_id', 'group_by_region', 'group_by_availability_zone', 'group_by_ami_id', 'group_by_instance_type', 'group_by_key_pair', 'group_by_vpc_id', 'group_by_security_group', 'group_by_tag_keys', 'group_by_tag_none', 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', 'group_by_elasticache_engine', 'group_by_elasticache_cluster', 'group_by_elasticache_parameter_group', 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): setattr(self, option, config.getboolean('ec2', option)) else: setattr(self, option, True) # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except configparser.NoOptionError: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get('ec2', 'pattern_exclude'); if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except configparser.NoOptionError: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): for instance_filter in config.get('ec2', 'instance_filters', '').split(','): instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] if not filter_key: continue self.ec2_instance_filters[filter_key].append(filter_value)
[ "def", "read_settings", "(", "self", ",", "configfile", ")", ":", "if", "six", ".", "PY3", ":", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "else", ":", "config", "=", "configparser", ".", "SafeConfigParser", "(", ")", "config", ".", "r...
Reads the settings from the ec2.ini file
[ "Reads", "the", "settings", "from", "the", "ec2", ".", "ini", "file" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L101-L300
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.do_api_calls_update_cache
def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index)
python
def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index)
[ "def", "do_api_calls_update_cache", "(", "self", ")", ":", "if", "self", ".", "route53_enabled", ":", "self", ".", "get_route53_records", "(", ")", "for", "region", "in", "self", ".", "regions", ":", "self", ".", "get_instances_by_region", "(", "region", ")", ...
Do API calls to each region, and save data in cache files
[ "Do", "API", "calls", "to", "each", "region", "and", "save", "data", "in", "cache", "files" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L302-L317
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.connect
def connect(self, region): ''' create connection to api server''' if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) return conn
python
def connect(self, region): ''' create connection to api server''' if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) return conn
[ "def", "connect", "(", "self", ",", "region", ")", ":", "if", "self", ".", "eucalyptus", ":", "conn", "=", "boto", ".", "connect_euca", "(", "host", "=", "self", ".", "eucalyptus_host", ")", "conn", ".", "APIVersion", "=", "'2010-08-31'", "else", ":", ...
create connection to api server
[ "create", "connection", "to", "api", "server" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L319-L326
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.boto_fix_security_token_in_profile
def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args
python
def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args
[ "def", "boto_fix_security_token_in_profile", "(", "self", ",", "connect_args", ")", ":", "profile", "=", "'profile '", "+", "self", ".", "boto_profile", "if", "boto", ".", "config", ".", "has_option", "(", "profile", ",", "'aws_security_token'", ")", ":", "conne...
monkey patch for boto issue boto/boto#2100
[ "monkey", "patch", "for", "boto", "issue", "boto", "/", "boto#2100" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L328-L333
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_instances_by_region
def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: conn = self.connect(region) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances')
python
def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: conn = self.connect(region) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances')
[ "def", "get_instances_by_region", "(", "self", ",", "region", ")", ":", "try", ":", "conn", "=", "self", ".", "connect", "(", "region", ")", "reservations", "=", "[", "]", "if", "self", ".", "ec2_instance_filters", ":", "for", "filter_key", ",", "filter_va...
Makes an AWS EC2 API call to the list of instances in a particular region
[ "Makes", "an", "AWS", "EC2", "API", "call", "to", "the", "list", "of", "instances", "in", "a", "particular", "region" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L349-L372
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_rds_instances_by_region
def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = self.connect_to_aws(rds, region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances')
python
def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = self.connect_to_aws(rds, region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances')
[ "def", "get_rds_instances_by_region", "(", "self", ",", "region", ")", ":", "try", ":", "conn", "=", "self", ".", "connect_to_aws", "(", "rds", ",", "region", ")", "if", "conn", ":", "instances", "=", "conn", ".", "get_all_dbinstances", "(", ")", "for", ...
Makes an AWS API call to the list of RDS instances in a particular region
[ "Makes", "an", "AWS", "API", "call", "to", "the", "list", "of", "RDS", "instances", "in", "a", "particular", "region" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L374-L391
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_elasticache_clusters_by_region
def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: # show_cache_node_info = True # because we also want nodes' information response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or # CacheNodes. Because of that wo can't make use of the get_list # method in the AWSQueryConnection. Let's do the work manually clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region)
python
def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: # show_cache_node_info = True # because we also want nodes' information response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or # CacheNodes. Because of that wo can't make use of the get_list # method in the AWSQueryConnection. Let's do the work manually clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region)
[ "def", "get_elasticache_clusters_by_region", "(", "self", ",", "region", ")", ":", "# ElastiCache boto module doesn't provide a get_all_intances method,", "# that's why we need to call describe directly (it would be called by", "# the shorthand method anyway...)", "try", ":", "conn", "="...
Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.
[ "Makes", "an", "AWS", "API", "call", "to", "the", "list", "of", "ElastiCache", "clusters", "(", "with", "nodes", "info", ")", "in", "a", "particular", "region", "." ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L393-L427
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_elasticache_replication_groups_by_region
def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that wo can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region)
python
def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that wo can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region)
[ "def", "get_elasticache_replication_groups_by_region", "(", "self", ",", "region", ")", ":", "# ElastiCache boto module doesn't provide a get_all_intances method,", "# that's why we need to call describe directly (it would be called by", "# the shorthand method anyway...)", "try", ":", "co...
Makes an AWS API call to the list of ElastiCache replication groups in a particular region.
[ "Makes", "an", "AWS", "API", "call", "to", "the", "list", "of", "ElastiCache", "replication", "groups", "in", "a", "particular", "region", "." ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L429-L461
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_auth_error_message
def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) if len(boto_config_found) > 0: errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors)
python
def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) if len(boto_config_found) > 0: errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors)
[ "def", "get_auth_error_message", "(", "self", ")", ":", "errors", "=", "[", "\"Authentication error retrieving ec2 inventory.\"", "]", "if", "None", "in", "[", "os", ".", "environ", ".", "get", "(", "'AWS_ACCESS_KEY_ID'", ")", ",", "os", ".", "environ", ".", "...
create an informative error message if there is an issue authenticating
[ "create", "an", "informative", "error", "message", "if", "there", "is", "an", "issue", "authenticating" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L463-L478
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.fail_with_error
def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1)
python
def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1)
[ "def", "fail_with_error", "(", "self", ",", "err_msg", ",", "err_operation", "=", "None", ")", ":", "if", "err_operation", ":", "err_msg", "=", "'ERROR: \"{err_msg}\", while: {err_operation}'", ".", "format", "(", "err_msg", "=", "err_msg", ",", "err_operation", "...
log an error to std err for ansible-playbook to consume and exit
[ "log", "an", "error", "to", "std", "err", "for", "ansible", "-", "playbook", "to", "consume", "and", "exit" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L480-L486
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.add_instance
def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only return instances with desired instance states if instance.state not in self.ec2_instance_states: return # Select the best destination address if instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: dest = getattr(instance, self.destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(dest): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(dest): return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.placement, dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) self.push_group(self.inventory, 'zones', instance.placement) # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) self.push(self.inventory, ami_id, dest) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, dest) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) self.push(self.inventory, vpc_id_name, dest) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if self.expand_csv_tags and v and ',' in v: values = map(lambda x: x.strip(), v.split(',')) else: values = [v] for v in values: if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, dest) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', dest) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
python
def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only return instances with desired instance states if instance.state not in self.ec2_instance_states: return # Select the best destination address if instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: dest = getattr(instance, self.destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(dest): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(dest): return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.placement, dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) self.push_group(self.inventory, 'zones', instance.placement) # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) self.push(self.inventory, ami_id, dest) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, dest) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) self.push(self.inventory, vpc_id_name, dest) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if self.expand_csv_tags and v and ',' in v: values = map(lambda x: x.strip(), v.split(',')) else: values = [v] for v in values: if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, dest) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', dest) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
[ "def", "add_instance", "(", "self", ",", "instance", ",", "region", ")", ":", "# Only return instances with desired instance states", "if", "instance", ".", "state", "not", "in", "self", ".", "ec2_instance_states", ":", "return", "# Select the best destination address", ...
Adds an instance to the inventory and index, as long as it is addressable
[ "Adds", "an", "instance", "to", "the", "inventory", "and", "index", "as", "long", "as", "it", "is", "addressable" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L496-L625
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.add_rds_instance
def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.availability_zone, dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) self.push_group(self.inventory, 'zones', instance.availability_zone) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) self.push(self.inventory, vpc_id_name, dest) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by engine if self.group_by_rds_engine: self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
python
def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.availability_zone, dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) self.push_group(self.inventory, 'zones', instance.availability_zone) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) self.push(self.inventory, vpc_id_name, dest) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by engine if self.group_by_rds_engine: self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
[ "def", "add_rds_instance", "(", "self", ",", "instance", ",", "region", ")", ":", "# Only want available instances unless all_rds_instances is True", "if", "not", "self", ".", "all_rds_instances", "and", "instance", ".", "status", "!=", "'available'", ":", "return", "...
Adds an RDS instance to the inventory and index, as long as it is addressable
[ "Adds", "an", "RDS", "instance", "to", "the", "inventory", "and", "index", "as", "long", "as", "it", "is", "addressable" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L628-L709
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.add_elasticache_cluster
def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable ''' # Only want available clusters unless all_elasticache_clusters is True if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': return # Select the best destination address if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] is_redis = False else: # Redis sigle node cluster # Because all Redis clusters are single nodes, we'll merge the # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, cluster['CacheClusterId']] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[cluster['CacheClusterId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) # Inventory: Group by parameter group if self.group_by_elasticache_parameter_group: self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) # Inventory: Group by replication group if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) host_info = self.get_host_info_dict_from_describe_dict(cluster) self.inventory["_meta"]["hostvars"][dest] = host_info # Add the nodes for node in cluster['CacheNodes']: self.add_elasticache_node(node, cluster, region)
python
def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable ''' # Only want available clusters unless all_elasticache_clusters is True if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': return # Select the best destination address if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] is_redis = False else: # Redis sigle node cluster # Because all Redis clusters are single nodes, we'll merge the # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, cluster['CacheClusterId']] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[cluster['CacheClusterId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) # Inventory: Group by parameter group if self.group_by_elasticache_parameter_group: self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) # Inventory: Group by replication group if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) host_info = self.get_host_info_dict_from_describe_dict(cluster) self.inventory["_meta"]["hostvars"][dest] = host_info # Add the nodes for node in cluster['CacheNodes']: self.add_elasticache_node(node, cluster, region)
[ "def", "add_elasticache_cluster", "(", "self", ",", "cluster", ",", "region", ")", ":", "# Only want available clusters unless all_elasticache_clusters is True", "if", "not", "self", ".", "all_elasticache_clusters", "and", "cluster", "[", "'CacheClusterStatus'", "]", "!=", ...
Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable
[ "Adds", "an", "ElastiCache", "cluster", "to", "the", "inventory", "and", "index", "as", "long", "as", "it", "s", "nodes", "are", "addressable" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L711-L808
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.add_elasticache_node
def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info
python
def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info
[ "def", "add_elasticache_node", "(", "self", ",", "node", ",", "cluster", ",", "region", ")", ":", "# Only want available nodes unless all_elasticache_nodes is True", "if", "not", "self", ".", "all_elasticache_nodes", "and", "node", "[", "'CacheNodeStatus'", "]", "!=", ...
Adds an ElastiCache node to the inventory and index, as long as it is addressable
[ "Adds", "an", "ElastiCache", "node", "to", "the", "inventory", "and", "index", "as", "long", "as", "it", "is", "addressable" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L810-L895
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.add_elasticache_replication_group
def add_elasticache_replication_group(self, replication_group, region): ''' Adds an ElastiCache replication group to the inventory and index ''' # Only want available clusters unless all_elasticache_replication_groups is True if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': return # Select the best destination address (PrimaryEndpoint) dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, replication_group['ReplicationGroupId']] # Inventory: Group by ID (always a group of 1) if self.group_by_instance_id: self.inventory[replication_group['ReplicationGroupId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone (doesn't apply to replication groups) # Inventory: Group by node type (doesn't apply to replication groups) # Inventory: Group by VPC (information not available in the current # AWS API version for replication groups # Inventory: Group by security group (doesn't apply to replication groups) # Check this value in cluster level # Inventory: Group by engine (replication groups are always Redis) if self.group_by_elasticache_engine: self.push(self.inventory, 'elasticache_redis', dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', 'redis') # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) host_info = self.get_host_info_dict_from_describe_dict(replication_group) self.inventory["_meta"]["hostvars"][dest] = host_info
python
def add_elasticache_replication_group(self, replication_group, region): ''' Adds an ElastiCache replication group to the inventory and index ''' # Only want available clusters unless all_elasticache_replication_groups is True if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': return # Select the best destination address (PrimaryEndpoint) dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, replication_group['ReplicationGroupId']] # Inventory: Group by ID (always a group of 1) if self.group_by_instance_id: self.inventory[replication_group['ReplicationGroupId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone (doesn't apply to replication groups) # Inventory: Group by node type (doesn't apply to replication groups) # Inventory: Group by VPC (information not available in the current # AWS API version for replication groups # Inventory: Group by security group (doesn't apply to replication groups) # Check this value in cluster level # Inventory: Group by engine (replication groups are always Redis) if self.group_by_elasticache_engine: self.push(self.inventory, 'elasticache_redis', dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', 'redis') # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) host_info = self.get_host_info_dict_from_describe_dict(replication_group) self.inventory["_meta"]["hostvars"][dest] = host_info
[ "def", "add_elasticache_replication_group", "(", "self", ",", "replication_group", ",", "region", ")", ":", "# Only want available clusters unless all_elasticache_replication_groups is True", "if", "not", "self", ".", "all_elasticache_replication_groups", "and", "replication_group"...
Adds an ElastiCache replication group to the inventory and index
[ "Adds", "an", "ElastiCache", "replication", "group", "to", "the", "inventory", "and", "index" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L897-L947
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_route53_records
def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [ zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones ] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name)
python
def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [ zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones ] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name)
[ "def", "get_route53_records", "(", "self", ")", ":", "r53_conn", "=", "route53", ".", "Route53Connection", "(", ")", "all_zones", "=", "r53_conn", ".", "get_zones", "(", ")", "route53_zones", "=", "[", "zone", "for", "zone", "in", "all_zones", "if", "zone", ...
Get and store the map of resource records to domain names that point to them.
[ "Get", "and", "store", "the", "map", "of", "resource", "records", "to", "domain", "names", "that", "point", "to", "them", "." ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L949-L972
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_instance_route53_names
def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = [ 'public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address' ] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list)
python
def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = [ 'public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address' ] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list)
[ "def", "get_instance_route53_names", "(", "self", ",", "instance", ")", ":", "instance_attributes", "=", "[", "'public_dns_name'", ",", "'private_dns_name'", ",", "'ip_address'", ",", "'private_ip_address'", "]", "name_list", "=", "set", "(", ")", "for", "attrib", ...
Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list.
[ "Check", "if", "an", "instance", "is", "referenced", "in", "the", "records", "we", "have", "from", "Route53", ".", "If", "it", "is", "return", "the", "list", "of", "domain", "names", "pointing", "to", "said", "instance", ".", "If", "nothing", "points", "...
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L975-L994
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_host_info_dict_from_describe_dict
def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif type(value) in [int, bool]: host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif type(value) == type(None): host_info[key] = '' else: # Remove non-processed complex types pass return host_info
python
def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif type(value) in [int, bool]: host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif type(value) == type(None): host_info[key] = '' else: # Remove non-processed complex types pass return host_info
[ "def", "get_host_info_dict_from_describe_dict", "(", "self", ",", "describe_dict", ")", ":", "# I really don't agree with prefixing everything with 'ec2'", "# because EC2, RDS and ElastiCache are different services.", "# I'm just following the pattern used until now to not break any", "# compa...
Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes.
[ "Parses", "the", "dictionary", "returned", "by", "the", "API", "call", "into", "a", "flat", "list", "of", "parameters", ".", "This", "method", "should", "be", "used", "only", "when", "describe", "is", "used", "directly", "because", "Boto", "doesn", "t", "p...
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1043-L1126
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.get_host
def get_host(self, host): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not host in self.index: # try updating the cache self.do_api_calls_update_cache() if not host in self.index: # host might not exist anymore return {} (region, instance_id) = self.index[host] instance = self.get_instance(region, instance_id) return self.get_host_info_dict_from_instance(instance)
python
def get_host(self, host): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not host in self.index: # try updating the cache self.do_api_calls_update_cache() if not host in self.index: # host might not exist anymore return {} (region, instance_id) = self.index[host] instance = self.get_instance(region, instance_id) return self.get_host_info_dict_from_instance(instance)
[ "def", "get_host", "(", "self", ",", "host", ")", ":", "if", "len", "(", "self", ".", "index", ")", "==", "0", ":", "# Need to load index from cache", "self", ".", "load_index_from_cache", "(", ")", "if", "not", "host", "in", "self", ".", "index", ":", ...
Get variables about a specific host
[ "Get", "variables", "about", "a", "specific", "host" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1128-L1145
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.push
def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element)
python
def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element)
[ "def", "push", "(", "self", ",", "my_dict", ",", "key", ",", "element", ")", ":", "group_info", "=", "my_dict", ".", "setdefault", "(", "key", ",", "[", "]", ")", "if", "isinstance", "(", "group_info", ",", "dict", ")", ":", "host_list", "=", "group_...
Push an element onto an array that may not have been defined in the dict
[ "Push", "an", "element", "onto", "an", "array", "that", "may", "not", "have", "been", "defined", "in", "the", "dict" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1147-L1155
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.push_group
def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element)
python
def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element)
[ "def", "push_group", "(", "self", ",", "my_dict", ",", "key", ",", "element", ")", ":", "parent_group", "=", "my_dict", ".", "setdefault", "(", "key", ",", "{", "}", ")", "if", "not", "isinstance", "(", "parent_group", ",", "dict", ")", ":", "parent_gr...
Push a group as a child of another group.
[ "Push", "a", "group", "as", "a", "child", "of", "another", "group", "." ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1157-L1164
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.load_inventory_from_cache
def load_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory)
python
def load_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory)
[ "def", "load_inventory_from_cache", "(", "self", ")", ":", "cache", "=", "open", "(", "self", ".", "cache_path_cache", ",", "'r'", ")", "json_inventory", "=", "cache", ".", "read", "(", ")", "self", ".", "inventory", "=", "json", ".", "loads", "(", "json...
Reads the inventory from the cache file and returns it as a JSON object
[ "Reads", "the", "inventory", "from", "the", "cache", "file", "and", "returns", "it", "as", "a", "JSON", "object" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1166-L1172
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.load_index_from_cache
def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index)
python
def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index)
[ "def", "load_index_from_cache", "(", "self", ")", ":", "cache", "=", "open", "(", "self", ".", "cache_path_index", ",", "'r'", ")", "json_index", "=", "cache", ".", "read", "(", ")", "self", ".", "index", "=", "json", ".", "loads", "(", "json_index", "...
Reads the index from the cache file sets self.index
[ "Reads", "the", "index", "from", "the", "cache", "file", "sets", "self", ".", "index" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1174-L1179
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.write_to_cache
def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = json.dumps(data, sort_keys=True, indent=2) cache = open(filename, 'w') cache.write(json_data) cache.close()
python
def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = json.dumps(data, sort_keys=True, indent=2) cache = open(filename, 'w') cache.write(json_data) cache.close()
[ "def", "write_to_cache", "(", "self", ",", "data", ",", "filename", ")", ":", "json_data", "=", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ")", "cache", "=", "open", "(", "filename", ",", "'w'", ")", ...
Writes data in JSON format to a file
[ "Writes", "data", "in", "JSON", "format", "to", "a", "file" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1181-L1187
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
Ec2Inventory.to_safe
def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word)
python
def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word)
[ "def", "to_safe", "(", "self", ",", "word", ")", ":", "regex", "=", "\"[^A-Za-z0-9\\_\"", "if", "not", "self", ".", "replace_dash_in_groups", ":", "regex", "+=", "\"\\-\"", "return", "re", ".", "sub", "(", "regex", "+", "\"]\"", ",", "\"_\"", ",", "word"...
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
[ "Converts", "bad", "characters", "in", "a", "string", "to", "underscores", "so", "they", "can", "be", "used", "as", "Ansible", "groups" ]
train
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1193-L1198
patarapolw/randomsentence
randomsentence/sentence_maker.py
SentenceMaker.from_keyword_list
def from_keyword_list(self, keyword_list, strictness=2, timeout=3): """ Convert a list of keywords to sentence. The result is sometimes None :param list keyword_list: a list of string :param int | None strictness: None for highest strictness. 2 or 1 for a less strict POS matching :param float timeout: timeout of this function :return list of tuple: sentence generated >>> SentenceMaker().from_keyword_list(['Love', 'blind', 'trouble']) [('For', False), ('love', True), ('to', False), ('such', False), ('blind', True), ('we', False), ('must', False), ('turn', False), ('to', False), ('the', False), ('trouble', True)] """ keyword_tags = nltk.pos_tag(keyword_list) start = time() while time() - start < timeout: index = 0 output_list = [] tagged_sent = self.random_sentences.get_tagged_sent() for word, tag in tagged_sent: if index >= len(keyword_tags): return self.get_overlap(keyword_list, output_list, is_word_list=True) if self.match_pos(tag, keyword_tags[index][1], strictness=strictness): output_list.append(keyword_tags[index][0]) index += 1 else: output_list.append(word) return []
python
def from_keyword_list(self, keyword_list, strictness=2, timeout=3): """ Convert a list of keywords to sentence. The result is sometimes None :param list keyword_list: a list of string :param int | None strictness: None for highest strictness. 2 or 1 for a less strict POS matching :param float timeout: timeout of this function :return list of tuple: sentence generated >>> SentenceMaker().from_keyword_list(['Love', 'blind', 'trouble']) [('For', False), ('love', True), ('to', False), ('such', False), ('blind', True), ('we', False), ('must', False), ('turn', False), ('to', False), ('the', False), ('trouble', True)] """ keyword_tags = nltk.pos_tag(keyword_list) start = time() while time() - start < timeout: index = 0 output_list = [] tagged_sent = self.random_sentences.get_tagged_sent() for word, tag in tagged_sent: if index >= len(keyword_tags): return self.get_overlap(keyword_list, output_list, is_word_list=True) if self.match_pos(tag, keyword_tags[index][1], strictness=strictness): output_list.append(keyword_tags[index][0]) index += 1 else: output_list.append(word) return []
[ "def", "from_keyword_list", "(", "self", ",", "keyword_list", ",", "strictness", "=", "2", ",", "timeout", "=", "3", ")", ":", "keyword_tags", "=", "nltk", ".", "pos_tag", "(", "keyword_list", ")", "start", "=", "time", "(", ")", "while", "time", "(", ...
Convert a list of keywords to sentence. The result is sometimes None :param list keyword_list: a list of string :param int | None strictness: None for highest strictness. 2 or 1 for a less strict POS matching :param float timeout: timeout of this function :return list of tuple: sentence generated >>> SentenceMaker().from_keyword_list(['Love', 'blind', 'trouble']) [('For', False), ('love', True), ('to', False), ('such', False), ('blind', True), ('we', False), ('must', False), ('turn', False), ('to', False), ('the', False), ('trouble', True)]
[ "Convert", "a", "list", "of", "keywords", "to", "sentence", ".", "The", "result", "is", "sometimes", "None" ]
train
https://github.com/patarapolw/randomsentence/blob/be4300e2e3fe4ab299268b05b0fe64de4f0aeb82/randomsentence/sentence_maker.py#L17-L46
gmr/tredis
tredis/scripting.py
ScriptingMixin.eval
def eval(self, script, keys=None, args=None): """:meth:`~tredis.RedisClient.eval` and :meth:`~tredis.RedisClient.evalsha` are used to evaluate scripts using the Lua interpreter built into Redis starting from version 2.6.0. The first argument of EVAL is a Lua 5.1 script. The script does not need to define a Lua function (and should not). It is just a Lua program that will run in the context of the Redis server. .. note:: **Time complexity**: Depends on the script that is executed. :param str script: The Lua script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed """ if not keys: keys = [] if not args: args = [] return self._execute([b'EVAL', script, str(len(keys))] + keys + args)
python
def eval(self, script, keys=None, args=None): """:meth:`~tredis.RedisClient.eval` and :meth:`~tredis.RedisClient.evalsha` are used to evaluate scripts using the Lua interpreter built into Redis starting from version 2.6.0. The first argument of EVAL is a Lua 5.1 script. The script does not need to define a Lua function (and should not). It is just a Lua program that will run in the context of the Redis server. .. note:: **Time complexity**: Depends on the script that is executed. :param str script: The Lua script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed """ if not keys: keys = [] if not args: args = [] return self._execute([b'EVAL', script, str(len(keys))] + keys + args)
[ "def", "eval", "(", "self", ",", "script", ",", "keys", "=", "None", ",", "args", "=", "None", ")", ":", "if", "not", "keys", ":", "keys", "=", "[", "]", "if", "not", "args", ":", "args", "=", "[", "]", "return", "self", ".", "_execute", "(", ...
:meth:`~tredis.RedisClient.eval` and :meth:`~tredis.RedisClient.evalsha` are used to evaluate scripts using the Lua interpreter built into Redis starting from version 2.6.0. The first argument of EVAL is a Lua 5.1 script. The script does not need to define a Lua function (and should not). It is just a Lua program that will run in the context of the Redis server. .. note:: **Time complexity**: Depends on the script that is executed. :param str script: The Lua script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed
[ ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "eval", "and", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "evalsha", "are", "used", "to", "evaluate", "scripts", "using", "the", "Lua", "interpreter", "built", "into", "Redis", "starting", "f...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/scripting.py#L7-L30
gmr/tredis
tredis/scripting.py
ScriptingMixin.evalsha
def evalsha(self, sha1, keys=None, args=None): """Evaluates a script cached on the server side by its SHA1 digest. Scripts are cached on the server side using the :meth:`~tredis.RedisClient.script_load` command. The command is otherwise identical to :meth:`~tredis.RedisClient.eval`. .. note:: **Time complexity**: Depends on the script that is executed. :param str sha1: The sha1 hash of the script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed """ if not keys: keys = [] if not args: args = [] return self._execute([b'EVALSHA', sha1, str(len(keys))] + keys + args)
python
def evalsha(self, sha1, keys=None, args=None): """Evaluates a script cached on the server side by its SHA1 digest. Scripts are cached on the server side using the :meth:`~tredis.RedisClient.script_load` command. The command is otherwise identical to :meth:`~tredis.RedisClient.eval`. .. note:: **Time complexity**: Depends on the script that is executed. :param str sha1: The sha1 hash of the script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed """ if not keys: keys = [] if not args: args = [] return self._execute([b'EVALSHA', sha1, str(len(keys))] + keys + args)
[ "def", "evalsha", "(", "self", ",", "sha1", ",", "keys", "=", "None", ",", "args", "=", "None", ")", ":", "if", "not", "keys", ":", "keys", "=", "[", "]", "if", "not", "args", ":", "args", "=", "[", "]", "return", "self", ".", "_execute", "(", ...
Evaluates a script cached on the server side by its SHA1 digest. Scripts are cached on the server side using the :meth:`~tredis.RedisClient.script_load` command. The command is otherwise identical to :meth:`~tredis.RedisClient.eval`. .. note:: **Time complexity**: Depends on the script that is executed. :param str sha1: The sha1 hash of the script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed
[ "Evaluates", "a", "script", "cached", "on", "the", "server", "side", "by", "its", "SHA1", "digest", ".", "Scripts", "are", "cached", "on", "the", "server", "side", "using", "the", ":", "meth", ":", "~tredis", ".", "RedisClient", ".", "script_load", "comman...
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/scripting.py#L32-L52
aganezov/bg
bg/kbreak.py
KBreak.valid_kbreak_matchings
def valid_kbreak_matchings(start_edges, result_edges): """ A staticmethod check implementation that makes sure that degrees of vertices, that are affected by current :class:`KBreak` By the notion of k-break, it shall keep the degree of vertices in :class:`bg.breakpoint_graph.BreakpointGraph` the same, after its application. By utilizing the Counter class, such check is performed, as the number the vertex is mentioned corresponds to its degree. :param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by :class:`KBreak` :type start_edges: ``list(tuple(vertex, vertex), ...)`` :param result_edges: a list of pairs of vertices, that specifies where edges shall be created by :class:`KBreak` :type result_edges: ``list(tuple(vertex, vertex), ...)`` :return: a flag indicating if the degree of vertices are equal in start / result edges, targeted by :class:`KBreak` :rtype: ``Boolean`` """ start_stats = Counter(vertex for vertex_pair in start_edges for vertex in vertex_pair) result_stats = Counter(vertex for vertex_pair in result_edges for vertex in vertex_pair) return start_stats == result_stats
python
def valid_kbreak_matchings(start_edges, result_edges): """ A staticmethod check implementation that makes sure that degrees of vertices, that are affected by current :class:`KBreak` By the notion of k-break, it shall keep the degree of vertices in :class:`bg.breakpoint_graph.BreakpointGraph` the same, after its application. By utilizing the Counter class, such check is performed, as the number the vertex is mentioned corresponds to its degree. :param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by :class:`KBreak` :type start_edges: ``list(tuple(vertex, vertex), ...)`` :param result_edges: a list of pairs of vertices, that specifies where edges shall be created by :class:`KBreak` :type result_edges: ``list(tuple(vertex, vertex), ...)`` :return: a flag indicating if the degree of vertices are equal in start / result edges, targeted by :class:`KBreak` :rtype: ``Boolean`` """ start_stats = Counter(vertex for vertex_pair in start_edges for vertex in vertex_pair) result_stats = Counter(vertex for vertex_pair in result_edges for vertex in vertex_pair) return start_stats == result_stats
[ "def", "valid_kbreak_matchings", "(", "start_edges", ",", "result_edges", ")", ":", "start_stats", "=", "Counter", "(", "vertex", "for", "vertex_pair", "in", "start_edges", "for", "vertex", "in", "vertex_pair", ")", "result_stats", "=", "Counter", "(", "vertex", ...
A staticmethod check implementation that makes sure that degrees of vertices, that are affected by current :class:`KBreak` By the notion of k-break, it shall keep the degree of vertices in :class:`bg.breakpoint_graph.BreakpointGraph` the same, after its application. By utilizing the Counter class, such check is performed, as the number the vertex is mentioned corresponds to its degree. :param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by :class:`KBreak` :type start_edges: ``list(tuple(vertex, vertex), ...)`` :param result_edges: a list of pairs of vertices, that specifies where edges shall be created by :class:`KBreak` :type result_edges: ``list(tuple(vertex, vertex), ...)`` :return: a flag indicating if the degree of vertices are equal in start / result edges, targeted by :class:`KBreak` :rtype: ``Boolean``
[ "A", "staticmethod", "check", "implementation", "that", "makes", "sure", "that", "degrees", "of", "vertices", "that", "are", "affected", "by", "current", ":", "class", ":", "KBreak" ]
train
https://github.com/aganezov/bg/blob/1ec758193441e49e7b34e0da09571480f4c24455/bg/kbreak.py#L83-L98
radzak/rtv-downloader
rtv/downloaders/common.py
VideoDownloader.render_path
def render_path(self) -> str: """Render path by filling the path template with video information.""" # TODO: Fix defaults when date is not found (empty string or None) # https://stackoverflow.com/questions/23407295/default-kwarg-values-for-pythons-str-format-method from string import Formatter class UnseenFormatter(Formatter): def get_value(self, key, args, kwds): if isinstance(key, str): try: return kwds[key] except KeyError: return key else: return super().get_value(key, args, kwds) data = self.video.data site_name = data['site'] try: template = self.templates[site_name] except KeyError: raise NoTemplateFoundError fmt = UnseenFormatter() filename_raw = fmt.format(template, **data) filename = clean_filename(filename_raw) path = os.path.join(self.download_dir, filename) return path
python
def render_path(self) -> str: """Render path by filling the path template with video information.""" # TODO: Fix defaults when date is not found (empty string or None) # https://stackoverflow.com/questions/23407295/default-kwarg-values-for-pythons-str-format-method from string import Formatter class UnseenFormatter(Formatter): def get_value(self, key, args, kwds): if isinstance(key, str): try: return kwds[key] except KeyError: return key else: return super().get_value(key, args, kwds) data = self.video.data site_name = data['site'] try: template = self.templates[site_name] except KeyError: raise NoTemplateFoundError fmt = UnseenFormatter() filename_raw = fmt.format(template, **data) filename = clean_filename(filename_raw) path = os.path.join(self.download_dir, filename) return path
[ "def", "render_path", "(", "self", ")", "->", "str", ":", "# TODO: Fix defaults when date is not found (empty string or None)", "# https://stackoverflow.com/questions/23407295/default-kwarg-values-for-pythons-str-format-method", "from", "string", "import", "Formatter", "class", "Unseen...
Render path by filling the path template with video information.
[ "Render", "path", "by", "filling", "the", "path", "template", "with", "video", "information", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/downloaders/common.py#L37-L66
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.get_lm_response
def get_lm_response(self, flags, challenge): """ Computes the 24 byte LMHash password hash given the 8 byte server challenge. :param challenge: The 8-byte challenge message generated by the server :return: The 24 byte LMHash """ # If lm compatibility level lower than 3, but the server negotiated NTLM2, generate an # NTLM2 response in preference to the weaker LMv1 if flags & NegotiateFlag.NTLMSSP_NTLM2_KEY and self._lm_compatibility < 3: response, key = self._client_challenge + b'\0' * 16, None elif 0 <= self._lm_compatibility <= 1: response, key = PasswordAuthentication._get_lm_response(self._password, challenge) elif self._lm_compatibility == 2: response, key = PasswordAuthentication.get_ntlmv1_response(self._password, challenge) else: response, key = PasswordAuthentication.get_lmv2_response(self._domain, self._username, self._password, challenge, self._client_challenge) return response, key
python
def get_lm_response(self, flags, challenge): """ Computes the 24 byte LMHash password hash given the 8 byte server challenge. :param challenge: The 8-byte challenge message generated by the server :return: The 24 byte LMHash """ # If lm compatibility level lower than 3, but the server negotiated NTLM2, generate an # NTLM2 response in preference to the weaker LMv1 if flags & NegotiateFlag.NTLMSSP_NTLM2_KEY and self._lm_compatibility < 3: response, key = self._client_challenge + b'\0' * 16, None elif 0 <= self._lm_compatibility <= 1: response, key = PasswordAuthentication._get_lm_response(self._password, challenge) elif self._lm_compatibility == 2: response, key = PasswordAuthentication.get_ntlmv1_response(self._password, challenge) else: response, key = PasswordAuthentication.get_lmv2_response(self._domain, self._username, self._password, challenge, self._client_challenge) return response, key
[ "def", "get_lm_response", "(", "self", ",", "flags", ",", "challenge", ")", ":", "# If lm compatibility level lower than 3, but the server negotiated NTLM2, generate an", "# NTLM2 response in preference to the weaker LMv1", "if", "flags", "&", "NegotiateFlag", ".", "NTLMSSP_NTLM2_K...
Computes the 24 byte LMHash password hash given the 8 byte server challenge. :param challenge: The 8-byte challenge message generated by the server :return: The 24 byte LMHash
[ "Computes", "the", "24", "byte", "LMHash", "password", "hash", "given", "the", "8", "byte", "server", "challenge", ".", ":", "param", "challenge", ":", "The", "8", "-", "byte", "challenge", "message", "generated", "by", "the", "server", ":", "return", ":",...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L127-L145
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.get_ntlm_response
def get_ntlm_response(self, flags, challenge, target_info=None, channel_binding=None): """ Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key. If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned :param challenge: The 8-byte challenge message generated by the server :return: A tuple containing the 24 byte NTLM Hash, Session Key and TargetInfo """ # TODO: IMPLEMENT THE FOLLOWING FEATURES # If NTLM v2 authentication is used and the CHALLENGE_MESSAGE does not contain both MsvAvNbComputerName and # MsvAvNbDomainName AVPairs and either Integrity is TRUE or Confidentiality is TRUE, then return STATUS_LOGON_FAILURE. # If NTLM v2 authentication is used and the CHALLENGE_MESSAGE contains a TargetInfo field, the client SHOULD NOT send # the LmChallengeResponse and SHOULD set the LmChallengeResponseLen and LmChallengeResponseMaxLen fields in the # AUTHENTICATE_MESSAGE to zero. # If lm compatibility level is 3 or lower, but the server negotiated NTLM2, generate an # NTLM2 response in preference to the weaker NTLMv1. if flags & NegotiateFlag.NTLMSSP_NTLM2_KEY and self._lm_compatibility < 3: response, key = PasswordAuthentication.get_ntlm2_response(self._password, challenge, self._client_challenge) elif 0 <= self._lm_compatibility < 3: response, key = PasswordAuthentication.get_ntlmv1_response(self._password, challenge) else: # We should use the timestamp included in TargetInfo, if no timestamp is set we generate one and add it to # the outgoing TargetInfo. If the timestamp is set, we should also set the MIC flag if target_info is None: target_info = TargetInfo() if target_info[TargetInfo.NTLMSSP_AV_TIME] is None: timestamp = PasswordAuthentication._get_ntlm_timestamp() else: # TODO: If the CHALLENGE_MESSAGE TargetInfo field (section 2.2.1.2) has an MsvAvTimestamp present, # TODO: the client SHOULD provide a MIC. timestamp = target_info[TargetInfo.NTLMSSP_AV_TIME][1] #target_info[TargetInfo.NTLMSSP_AV_FLAGS] = struct.pack('<I', 2) # Calculating channel bindings is poorly documented. It is implemented in winrmlib, and needs to be # moved here # if self._av_channel_bindings is True and channel_binding is not None: # target_info[TargetInfo.NTLMSSP_AV_CHANNEL_BINDINGS] = channel_binding response, key, target_info = PasswordAuthentication.get_ntlmv2_response( self._domain, self._username, self._password.encode('utf-16le'), challenge, self._client_challenge, timestamp, target_info) return response, key, target_info
python
def get_ntlm_response(self, flags, challenge, target_info=None, channel_binding=None): """ Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key. If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned :param challenge: The 8-byte challenge message generated by the server :return: A tuple containing the 24 byte NTLM Hash, Session Key and TargetInfo """ # TODO: IMPLEMENT THE FOLLOWING FEATURES # If NTLM v2 authentication is used and the CHALLENGE_MESSAGE does not contain both MsvAvNbComputerName and # MsvAvNbDomainName AVPairs and either Integrity is TRUE or Confidentiality is TRUE, then return STATUS_LOGON_FAILURE. # If NTLM v2 authentication is used and the CHALLENGE_MESSAGE contains a TargetInfo field, the client SHOULD NOT send # the LmChallengeResponse and SHOULD set the LmChallengeResponseLen and LmChallengeResponseMaxLen fields in the # AUTHENTICATE_MESSAGE to zero. # If lm compatibility level is 3 or lower, but the server negotiated NTLM2, generate an # NTLM2 response in preference to the weaker NTLMv1. if flags & NegotiateFlag.NTLMSSP_NTLM2_KEY and self._lm_compatibility < 3: response, key = PasswordAuthentication.get_ntlm2_response(self._password, challenge, self._client_challenge) elif 0 <= self._lm_compatibility < 3: response, key = PasswordAuthentication.get_ntlmv1_response(self._password, challenge) else: # We should use the timestamp included in TargetInfo, if no timestamp is set we generate one and add it to # the outgoing TargetInfo. If the timestamp is set, we should also set the MIC flag if target_info is None: target_info = TargetInfo() if target_info[TargetInfo.NTLMSSP_AV_TIME] is None: timestamp = PasswordAuthentication._get_ntlm_timestamp() else: # TODO: If the CHALLENGE_MESSAGE TargetInfo field (section 2.2.1.2) has an MsvAvTimestamp present, # TODO: the client SHOULD provide a MIC. timestamp = target_info[TargetInfo.NTLMSSP_AV_TIME][1] #target_info[TargetInfo.NTLMSSP_AV_FLAGS] = struct.pack('<I', 2) # Calculating channel bindings is poorly documented. It is implemented in winrmlib, and needs to be # moved here # if self._av_channel_bindings is True and channel_binding is not None: # target_info[TargetInfo.NTLMSSP_AV_CHANNEL_BINDINGS] = channel_binding response, key, target_info = PasswordAuthentication.get_ntlmv2_response( self._domain, self._username, self._password.encode('utf-16le'), challenge, self._client_challenge, timestamp, target_info) return response, key, target_info
[ "def", "get_ntlm_response", "(", "self", ",", "flags", ",", "challenge", ",", "target_info", "=", "None", ",", "channel_binding", "=", "None", ")", ":", "# TODO: IMPLEMENT THE FOLLOWING FEATURES", "# If NTLM v2 authentication is used and the CHALLENGE_MESSAGE does not contain b...
Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key. If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned :param challenge: The 8-byte challenge message generated by the server :return: A tuple containing the 24 byte NTLM Hash, Session Key and TargetInfo
[ "Computes", "the", "24", "byte", "NTLM", "challenge", "response", "given", "the", "8", "byte", "server", "challenge", "along", "with", "the", "session", "key", ".", "If", "NTLMv2", "is", "used", "the", "TargetInfo", "structure", "must", "be", "supplied", "th...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L147-L191
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication._expand_des_key
def _expand_des_key(key): """ Expand the key from a 7-byte password key into a 8-byte DES key """ key = key[:7] + b'\0' * (7 - len(key)) byte = struct.unpack_from('BBBBBBB', key) s = struct.pack('B', ((byte[0] >> 1) & 0x7f) << 1) s += struct.pack("B", ((byte[0] & 0x01) << 6 | ((byte[1] >> 2) & 0x3f)) << 1) s += struct.pack("B", ((byte[1] & 0x03) << 5 | ((byte[2] >> 3) & 0x1f)) << 1) s += struct.pack("B", ((byte[2] & 0x07) << 4 | ((byte[3] >> 4) & 0x0f)) << 1) s += struct.pack("B", ((byte[3] & 0x0f) << 3 | ((byte[4] >> 5) & 0x07)) << 1) s += struct.pack("B", ((byte[4] & 0x1f) << 2 | ((byte[5] >> 6) & 0x03)) << 1) s += struct.pack("B", ((byte[5] & 0x3f) << 1 | ((byte[6] >> 7) & 0x01)) << 1) s += struct.pack("B", (byte[6] & 0x7f) << 1) return s
python
def _expand_des_key(key): """ Expand the key from a 7-byte password key into a 8-byte DES key """ key = key[:7] + b'\0' * (7 - len(key)) byte = struct.unpack_from('BBBBBBB', key) s = struct.pack('B', ((byte[0] >> 1) & 0x7f) << 1) s += struct.pack("B", ((byte[0] & 0x01) << 6 | ((byte[1] >> 2) & 0x3f)) << 1) s += struct.pack("B", ((byte[1] & 0x03) << 5 | ((byte[2] >> 3) & 0x1f)) << 1) s += struct.pack("B", ((byte[2] & 0x07) << 4 | ((byte[3] >> 4) & 0x0f)) << 1) s += struct.pack("B", ((byte[3] & 0x0f) << 3 | ((byte[4] >> 5) & 0x07)) << 1) s += struct.pack("B", ((byte[4] & 0x1f) << 2 | ((byte[5] >> 6) & 0x03)) << 1) s += struct.pack("B", ((byte[5] & 0x3f) << 1 | ((byte[6] >> 7) & 0x01)) << 1) s += struct.pack("B", (byte[6] & 0x7f) << 1) return s
[ "def", "_expand_des_key", "(", "key", ")", ":", "key", "=", "key", "[", ":", "7", "]", "+", "b'\\0'", "*", "(", "7", "-", "len", "(", "key", ")", ")", "byte", "=", "struct", ".", "unpack_from", "(", "'BBBBBBB'", ",", "key", ")", "s", "=", "stru...
Expand the key from a 7-byte password key into a 8-byte DES key
[ "Expand", "the", "key", "from", "a", "7", "-", "byte", "password", "key", "into", "a", "8", "-", "byte", "DES", "key" ]
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L194-L208
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.get_ntlmv1_response
def get_ntlmv1_response(password, challenge): """ Generate the Unicode MD4 hash for the password associated with these credentials. """ ntlm_hash = PasswordAuthentication.ntowfv1(password.encode('utf-16le')) response = PasswordAuthentication._encrypt_des_block(ntlm_hash[:7], challenge) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[7:14], challenge) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[14:], challenge) # The NTLMv1 session key is simply the MD4 hash of the ntlm hash session_hash = hashlib.new('md4') session_hash.update(ntlm_hash) return response, session_hash.digest()
python
def get_ntlmv1_response(password, challenge): """ Generate the Unicode MD4 hash for the password associated with these credentials. """ ntlm_hash = PasswordAuthentication.ntowfv1(password.encode('utf-16le')) response = PasswordAuthentication._encrypt_des_block(ntlm_hash[:7], challenge) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[7:14], challenge) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[14:], challenge) # The NTLMv1 session key is simply the MD4 hash of the ntlm hash session_hash = hashlib.new('md4') session_hash.update(ntlm_hash) return response, session_hash.digest()
[ "def", "get_ntlmv1_response", "(", "password", ",", "challenge", ")", ":", "ntlm_hash", "=", "PasswordAuthentication", ".", "ntowfv1", "(", "password", ".", "encode", "(", "'utf-16le'", ")", ")", "response", "=", "PasswordAuthentication", ".", "_encrypt_des_block", ...
Generate the Unicode MD4 hash for the password associated with these credentials.
[ "Generate", "the", "Unicode", "MD4", "hash", "for", "the", "password", "associated", "with", "these", "credentials", "." ]
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L237-L249
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.get_ntlm2_response
def get_ntlm2_response(password, server_challenge, client_challenge): """ Generate the Unicode MD4 hash for the password associated with these credentials. """ md5 = hashlib.new('md5') md5.update(server_challenge + client_challenge) ntlm2_session_hash = md5.digest()[:8] ntlm_hash = PasswordAuthentication.ntowfv1(password.encode('utf-16le')) response = PasswordAuthentication._encrypt_des_block(ntlm_hash[:7], ntlm2_session_hash) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[7:14], ntlm2_session_hash) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[14:], ntlm2_session_hash) session_hash = hashlib.new('md4') session_hash.update(ntlm_hash) hmac_context = hmac.HMAC(session_hash.digest(), hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge + client_challenge) return response, hmac_context.finalize()
python
def get_ntlm2_response(password, server_challenge, client_challenge): """ Generate the Unicode MD4 hash for the password associated with these credentials. """ md5 = hashlib.new('md5') md5.update(server_challenge + client_challenge) ntlm2_session_hash = md5.digest()[:8] ntlm_hash = PasswordAuthentication.ntowfv1(password.encode('utf-16le')) response = PasswordAuthentication._encrypt_des_block(ntlm_hash[:7], ntlm2_session_hash) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[7:14], ntlm2_session_hash) response += PasswordAuthentication._encrypt_des_block(ntlm_hash[14:], ntlm2_session_hash) session_hash = hashlib.new('md4') session_hash.update(ntlm_hash) hmac_context = hmac.HMAC(session_hash.digest(), hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge + client_challenge) return response, hmac_context.finalize()
[ "def", "get_ntlm2_response", "(", "password", ",", "server_challenge", ",", "client_challenge", ")", ":", "md5", "=", "hashlib", ".", "new", "(", "'md5'", ")", "md5", ".", "update", "(", "server_challenge", "+", "client_challenge", ")", "ntlm2_session_hash", "="...
Generate the Unicode MD4 hash for the password associated with these credentials.
[ "Generate", "the", "Unicode", "MD4", "hash", "for", "the", "password", "associated", "with", "these", "credentials", "." ]
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L252-L268
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.ntowfv2
def ntowfv2(domain, user, password): """ NTOWFv2() Implementation [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication :param domain: The windows domain name :param user: The windows username :param password: The users password :return: Hash Data """ md4 = hashlib.new('md4') md4.update(password) hmac_context = hmac.HMAC(md4.digest(), hashes.MD5(), backend=default_backend()) hmac_context.update(user.upper().encode('utf-16le')) hmac_context.update(domain.encode('utf-16le')) return hmac_context.finalize()
python
def ntowfv2(domain, user, password): """ NTOWFv2() Implementation [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication :param domain: The windows domain name :param user: The windows username :param password: The users password :return: Hash Data """ md4 = hashlib.new('md4') md4.update(password) hmac_context = hmac.HMAC(md4.digest(), hashes.MD5(), backend=default_backend()) hmac_context.update(user.upper().encode('utf-16le')) hmac_context.update(domain.encode('utf-16le')) return hmac_context.finalize()
[ "def", "ntowfv2", "(", "domain", ",", "user", ",", "password", ")", ":", "md4", "=", "hashlib", ".", "new", "(", "'md4'", ")", "md4", ".", "update", "(", "password", ")", "hmac_context", "=", "hmac", ".", "HMAC", "(", "md4", ".", "digest", "(", ")"...
NTOWFv2() Implementation [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication :param domain: The windows domain name :param user: The windows username :param password: The users password :return: Hash Data
[ "NTOWFv2", "()", "Implementation", "[", "MS", "-", "NLMP", "]", "v20140502", "NT", "LAN", "Manager", "(", "NTLM", ")", "Authentication", "Protocol", "3", ".", "3", ".", "2", "NTLM", "v2", "Authentication", ":", "param", "domain", ":", "The", "windows", "...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L285-L300
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication._compute_response
def _compute_response(response_key, server_challenge, client_challenge): """ ComputeResponse() has been refactored slightly to reduce its complexity and improve readability, the 'if' clause which switches between LMv2 and NTLMv2 computation has been removed. Users should not call this method directly, they should rely on get_lmv2_response and get_ntlmv2_response depending on the negotiated flags. [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication """ hmac_context = hmac.HMAC(response_key, hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge) hmac_context.update(client_challenge) return hmac_context.finalize()
python
def _compute_response(response_key, server_challenge, client_challenge): """ ComputeResponse() has been refactored slightly to reduce its complexity and improve readability, the 'if' clause which switches between LMv2 and NTLMv2 computation has been removed. Users should not call this method directly, they should rely on get_lmv2_response and get_ntlmv2_response depending on the negotiated flags. [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication """ hmac_context = hmac.HMAC(response_key, hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge) hmac_context.update(client_challenge) return hmac_context.finalize()
[ "def", "_compute_response", "(", "response_key", ",", "server_challenge", ",", "client_challenge", ")", ":", "hmac_context", "=", "hmac", ".", "HMAC", "(", "response_key", ",", "hashes", ".", "MD5", "(", ")", ",", "backend", "=", "default_backend", "(", ")", ...
ComputeResponse() has been refactored slightly to reduce its complexity and improve readability, the 'if' clause which switches between LMv2 and NTLMv2 computation has been removed. Users should not call this method directly, they should rely on get_lmv2_response and get_ntlmv2_response depending on the negotiated flags. [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication
[ "ComputeResponse", "()", "has", "been", "refactored", "slightly", "to", "reduce", "its", "complexity", "and", "improve", "readability", "the", "if", "clause", "which", "switches", "between", "LMv2", "and", "NTLMv2", "computation", "has", "been", "removed", ".", ...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L303-L316
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.get_lmv2_response
def get_lmv2_response(domain, username, password, server_challenge, client_challenge): """ Computes an appropriate LMv2 response based on the supplied arguments The algorithm is based on jCIFS. The response is 24 bytes, with the 16 bytes of hash concatenated with the 8 byte client client_challenge """ ntlmv2_hash = PasswordAuthentication.ntowfv2(domain, username, password.encode('utf-16le')) hmac_context = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge) hmac_context.update(client_challenge) lmv2_hash = hmac_context.finalize() # The LMv2 master user session key is a HMAC MD5 of the NTLMv2 and LMv2 hash session_key = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend()) session_key.update(lmv2_hash) return lmv2_hash + client_challenge, session_key.finalize()
python
def get_lmv2_response(domain, username, password, server_challenge, client_challenge): """ Computes an appropriate LMv2 response based on the supplied arguments The algorithm is based on jCIFS. The response is 24 bytes, with the 16 bytes of hash concatenated with the 8 byte client client_challenge """ ntlmv2_hash = PasswordAuthentication.ntowfv2(domain, username, password.encode('utf-16le')) hmac_context = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge) hmac_context.update(client_challenge) lmv2_hash = hmac_context.finalize() # The LMv2 master user session key is a HMAC MD5 of the NTLMv2 and LMv2 hash session_key = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend()) session_key.update(lmv2_hash) return lmv2_hash + client_challenge, session_key.finalize()
[ "def", "get_lmv2_response", "(", "domain", ",", "username", ",", "password", ",", "server_challenge", ",", "client_challenge", ")", ":", "ntlmv2_hash", "=", "PasswordAuthentication", ".", "ntowfv2", "(", "domain", ",", "username", ",", "password", ".", "encode", ...
Computes an appropriate LMv2 response based on the supplied arguments The algorithm is based on jCIFS. The response is 24 bytes, with the 16 bytes of hash concatenated with the 8 byte client client_challenge
[ "Computes", "an", "appropriate", "LMv2", "response", "based", "on", "the", "supplied", "arguments", "The", "algorithm", "is", "based", "on", "jCIFS", ".", "The", "response", "is", "24", "bytes", "with", "the", "16", "bytes", "of", "hash", "concatenated", "wi...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L319-L335
ianclegg/ntlmlib
ntlmlib/authentication.py
PasswordAuthentication.get_ntlmv2_response
def get_ntlmv2_response(domain, user, password, server_challenge, client_challenge, timestamp, target_info): """ [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response """ lo_response_version = b'\x01' hi_response_version = b'\x01' reserved_dword = b'\x00' * 4 reserved_bytes = b'\x00' * 6 response_key = PasswordAuthentication.ntowfv2(domain, user, password) proof_material = lo_response_version proof_material += hi_response_version proof_material += reserved_bytes proof_material += timestamp proof_material += client_challenge proof_material += reserved_dword proof_material += target_info.get_data() proof_material += reserved_dword proof = PasswordAuthentication._compute_response(response_key, server_challenge, proof_material) # The master session key derivation session_key = hmac.HMAC(response_key, hashes.MD5(), backend=default_backend()) session_key.update(proof) session_master_key = session_key.finalize() return proof + proof_material, session_master_key, target_info
python
def get_ntlmv2_response(domain, user, password, server_challenge, client_challenge, timestamp, target_info): """ [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response """ lo_response_version = b'\x01' hi_response_version = b'\x01' reserved_dword = b'\x00' * 4 reserved_bytes = b'\x00' * 6 response_key = PasswordAuthentication.ntowfv2(domain, user, password) proof_material = lo_response_version proof_material += hi_response_version proof_material += reserved_bytes proof_material += timestamp proof_material += client_challenge proof_material += reserved_dword proof_material += target_info.get_data() proof_material += reserved_dword proof = PasswordAuthentication._compute_response(response_key, server_challenge, proof_material) # The master session key derivation session_key = hmac.HMAC(response_key, hashes.MD5(), backend=default_backend()) session_key.update(proof) session_master_key = session_key.finalize() return proof + proof_material, session_master_key, target_info
[ "def", "get_ntlmv2_response", "(", "domain", ",", "user", ",", "password", ",", "server_challenge", ",", "client_challenge", ",", "timestamp", ",", "target_info", ")", ":", "lo_response_version", "=", "b'\\x01'", "hi_response_version", "=", "b'\\x01'", "reserved_dword...
[MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response
[ "[", "MS", "-", "NLMP", "]", "v20140502", "NT", "LAN", "Manager", "(", "NTLM", ")", "Authentication", "Protocol", "3", ".", "3", ".", "2", "NTLM", "v2", "Authentication" ]
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/authentication.py#L338-L376
backbohne/docx-xslt
docxxslt/engines.py
BaseEngine.xml
def xml(self, value): """Set new XML string""" self._xml = value self._root = s2t(value)
python
def xml(self, value): """Set new XML string""" self._xml = value self._root = s2t(value)
[ "def", "xml", "(", "self", ",", "value", ")", ":", "self", ".", "_xml", "=", "value", "self", ".", "_root", "=", "s2t", "(", "value", ")" ]
Set new XML string
[ "Set", "new", "XML", "string" ]
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L26-L30
backbohne/docx-xslt
docxxslt/engines.py
BaseEngine.root
def root(self, value): """Set new XML tree""" self._xml = t2s(value) self._root = value
python
def root(self, value): """Set new XML tree""" self._xml = t2s(value) self._root = value
[ "def", "root", "(", "self", ",", "value", ")", ":", "self", ".", "_xml", "=", "t2s", "(", "value", ")", "self", ".", "_root", "=", "value" ]
Set new XML tree
[ "Set", "new", "XML", "tree" ]
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L39-L43
backbohne/docx-xslt
docxxslt/engines.py
XslEngine.xsl_elements
def xsl_elements(self): """Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements""" def append_xsl_elements(xsl_elements, r, xsl): if r is not None: r.xpath('.//w:t', namespaces=self.namespaces)[0].text = xsl xe = XslElement(r, logger=self.logger) xsl_elements.append(xe) return None, '' if not getattr(self, '_xsl_elements', None): xsl_elements = [] for p in self.root.xpath('.//w:p', namespaces=self.namespaces): xsl_r, xsl = None, '' for r in p: # find first XSL run and add all XSL meta text text = ''.join(t.text for t in r.xpath('.//w:t', namespaces=self.namespaces)) if r.xpath('.//w:rPr/w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces): xsl += text if xsl_r is None and text: xsl_r = r else: r.getparent().remove(r) elif text: xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl) xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl) self._xsl_elements = xsl_elements return self._xsl_elements
python
def xsl_elements(self): """Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements""" def append_xsl_elements(xsl_elements, r, xsl): if r is not None: r.xpath('.//w:t', namespaces=self.namespaces)[0].text = xsl xe = XslElement(r, logger=self.logger) xsl_elements.append(xe) return None, '' if not getattr(self, '_xsl_elements', None): xsl_elements = [] for p in self.root.xpath('.//w:p', namespaces=self.namespaces): xsl_r, xsl = None, '' for r in p: # find first XSL run and add all XSL meta text text = ''.join(t.text for t in r.xpath('.//w:t', namespaces=self.namespaces)) if r.xpath('.//w:rPr/w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces): xsl += text if xsl_r is None and text: xsl_r = r else: r.getparent().remove(r) elif text: xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl) xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl) self._xsl_elements = xsl_elements return self._xsl_elements
[ "def", "xsl_elements", "(", "self", ")", ":", "def", "append_xsl_elements", "(", "xsl_elements", ",", "r", ",", "xsl", ")", ":", "if", "r", "is", "not", "None", ":", "r", ".", "xpath", "(", "'.//w:t'", ",", "namespaces", "=", "self", ".", "namespaces",...
Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements
[ "Find", "all", "XSL", "styled", "runs", "normalize", "related", "paragraph", "and", "returns", "list", "of", "XslElements" ]
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L63-L91
backbohne/docx-xslt
docxxslt/engines.py
XslEngine.render_xsl
def render_xsl(self, node, context): """Render all XSL elements""" for e in self.xsl_elements: e.render(e.run)
python
def render_xsl(self, node, context): """Render all XSL elements""" for e in self.xsl_elements: e.render(e.run)
[ "def", "render_xsl", "(", "self", ",", "node", ",", "context", ")", ":", "for", "e", "in", "self", ".", "xsl_elements", ":", "e", ".", "render", "(", "e", ".", "run", ")" ]
Render all XSL elements
[ "Render", "all", "XSL", "elements" ]
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L93-L97
backbohne/docx-xslt
docxxslt/engines.py
XslEngine.remove_style
def remove_style(self): """Remove all XSL run rStyle elements""" for n in self.root.xpath('.//w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces): n.getparent().remove(n)
python
def remove_style(self): """Remove all XSL run rStyle elements""" for n in self.root.xpath('.//w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces): n.getparent().remove(n)
[ "def", "remove_style", "(", "self", ")", ":", "for", "n", "in", "self", ".", "root", ".", "xpath", "(", "'.//w:rStyle[@w:val=\"%s\"]'", "%", "self", ".", "style", ",", "namespaces", "=", "self", ".", "namespaces", ")", ":", "n", ".", "getparent", "(", ...
Remove all XSL run rStyle elements
[ "Remove", "all", "XSL", "run", "rStyle", "elements" ]
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L99-L103
backbohne/docx-xslt
docxxslt/engines.py
XslEngine.render
def render(self, xml, context, raise_on_errors=True): """Render xml string and apply XSLT transfomation with context""" if xml: self.xml = xml # render XSL self.render_xsl(self.root, context) # create root XSL sheet xsl_ns = self.namespaces['xsl'] rootName = etree.QName(xsl_ns, 'stylesheet') root = etree.Element(rootName, nsmap={'xsl': xsl_ns}) sheet = etree.ElementTree(root) template = etree.SubElement(root, etree.QName(xsl_ns, "template"), match='/') # put OpenOffice tree into XSLT sheet template.append(self.root) self.root = root # drop XSL styles self.remove_style() #self.debug(self.xml) try: # transform XSL xsl = etree.XSLT(self.root) self.root = xsl(context) except etree.Error as e: # log errors for l in e.error_log: self.error("XSLT error at line %s col %s:" % (l.line, l.column)) self.error(" message: %s" % l.message) self.error(" domain: %s (%d)" % (l.domain_name, l.domain)) self.error(' type: %s (%d)' % (l.type_name, l.type)) self.error(' level: %s (%d)' % (l.level_name, l.level)) self.error(' filename: %s' % l.filename) if raise_on_errors: raise return self.xml else: return xml
python
def render(self, xml, context, raise_on_errors=True): """Render xml string and apply XSLT transfomation with context""" if xml: self.xml = xml # render XSL self.render_xsl(self.root, context) # create root XSL sheet xsl_ns = self.namespaces['xsl'] rootName = etree.QName(xsl_ns, 'stylesheet') root = etree.Element(rootName, nsmap={'xsl': xsl_ns}) sheet = etree.ElementTree(root) template = etree.SubElement(root, etree.QName(xsl_ns, "template"), match='/') # put OpenOffice tree into XSLT sheet template.append(self.root) self.root = root # drop XSL styles self.remove_style() #self.debug(self.xml) try: # transform XSL xsl = etree.XSLT(self.root) self.root = xsl(context) except etree.Error as e: # log errors for l in e.error_log: self.error("XSLT error at line %s col %s:" % (l.line, l.column)) self.error(" message: %s" % l.message) self.error(" domain: %s (%d)" % (l.domain_name, l.domain)) self.error(' type: %s (%d)' % (l.type_name, l.type)) self.error(' level: %s (%d)' % (l.level_name, l.level)) self.error(' filename: %s' % l.filename) if raise_on_errors: raise return self.xml else: return xml
[ "def", "render", "(", "self", ",", "xml", ",", "context", ",", "raise_on_errors", "=", "True", ")", ":", "if", "xml", ":", "self", ".", "xml", "=", "xml", "# render XSL", "self", ".", "render_xsl", "(", "self", ".", "root", ",", "context", ")", "# cr...
Render xml string and apply XSLT transfomation with context
[ "Render", "xml", "string", "and", "apply", "XSLT", "transfomation", "with", "context" ]
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L105-L151
ntucker/django-aloha-edit
aloha/decorators.py
handle_image_posts
def handle_image_posts(function=None): """ Decorator for views that handles ajax image posts in base64 encoding, saving the image and returning the url """ @wraps(function, assigned=available_attrs(function)) def _wrapped_view(request, *args, **kwargs): if 'image' in request.META['CONTENT_TYPE']: name = default_storage.save(os.path.join('images', 'aloha-uploads', request.META['HTTP_X_FILE_NAME']), ContentFile(base64.b64decode(request.body.split(",", 1)[1]))) return HttpResponse(posixpath.join(settings.MEDIA_URL, name), content_type="text/plain") else: return function(request, *args, **kwargs) return _wrapped_view
python
def handle_image_posts(function=None): """ Decorator for views that handles ajax image posts in base64 encoding, saving the image and returning the url """ @wraps(function, assigned=available_attrs(function)) def _wrapped_view(request, *args, **kwargs): if 'image' in request.META['CONTENT_TYPE']: name = default_storage.save(os.path.join('images', 'aloha-uploads', request.META['HTTP_X_FILE_NAME']), ContentFile(base64.b64decode(request.body.split(",", 1)[1]))) return HttpResponse(posixpath.join(settings.MEDIA_URL, name), content_type="text/plain") else: return function(request, *args, **kwargs) return _wrapped_view
[ "def", "handle_image_posts", "(", "function", "=", "None", ")", ":", "@", "wraps", "(", "function", ",", "assigned", "=", "available_attrs", "(", "function", ")", ")", "def", "_wrapped_view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")"...
Decorator for views that handles ajax image posts in base64 encoding, saving the image and returning the url
[ "Decorator", "for", "views", "that", "handles", "ajax", "image", "posts", "in", "base64", "encoding", "saving", "the", "image", "and", "returning", "the", "url" ]
train
https://github.com/ntucker/django-aloha-edit/blob/199ced300e3acf63c58c659c6cd3559a35005b2c/aloha/decorators.py#L15-L28
xolox/python-verboselogs
verboselogs/pylint.py
verboselogs_class_transform
def verboselogs_class_transform(cls): """Make Pylint aware of our custom logger methods.""" if cls.name == 'RootLogger': for meth in ['notice', 'spam', 'success', 'verbose']: cls.locals[meth] = [scoped_nodes.Function(meth, None)]
python
def verboselogs_class_transform(cls): """Make Pylint aware of our custom logger methods.""" if cls.name == 'RootLogger': for meth in ['notice', 'spam', 'success', 'verbose']: cls.locals[meth] = [scoped_nodes.Function(meth, None)]
[ "def", "verboselogs_class_transform", "(", "cls", ")", ":", "if", "cls", ".", "name", "==", "'RootLogger'", ":", "for", "meth", "in", "[", "'notice'", ",", "'spam'", ",", "'success'", ",", "'verbose'", "]", ":", "cls", ".", "locals", "[", "meth", "]", ...
Make Pylint aware of our custom logger methods.
[ "Make", "Pylint", "aware", "of", "our", "custom", "logger", "methods", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/pylint.py#L20-L24
xolox/python-verboselogs
verboselogs/pylint.py
verboselogs_module_transform
def verboselogs_module_transform(mod): """Make Pylint aware of our custom log levels.""" if mod.name == 'logging': for const in ['NOTICE', 'SPAM', 'SUCCESS', 'VERBOSE']: mod.locals[const] = [nodes.Const(const)]
python
def verboselogs_module_transform(mod): """Make Pylint aware of our custom log levels.""" if mod.name == 'logging': for const in ['NOTICE', 'SPAM', 'SUCCESS', 'VERBOSE']: mod.locals[const] = [nodes.Const(const)]
[ "def", "verboselogs_module_transform", "(", "mod", ")", ":", "if", "mod", ".", "name", "==", "'logging'", ":", "for", "const", "in", "[", "'NOTICE'", ",", "'SPAM'", ",", "'SUCCESS'", ",", "'VERBOSE'", "]", ":", "mod", ".", "locals", "[", "const", "]", ...
Make Pylint aware of our custom log levels.
[ "Make", "Pylint", "aware", "of", "our", "custom", "log", "levels", "." ]
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/pylint.py#L27-L31
mk-fg/feedjack
feedjack/views.py
cache_etag
def cache_etag(request, *argz, **kwz): '''Produce etag value for a cached page. Intended for usage in conditional views (@condition decorator).''' response, site, cachekey = kwz.get('_view_data') or initview(request) if not response: return None return fjcache.str2md5( '{0}--{1}--{2}'.format( site.id if site else 'x', cachekey, response[1].strftime('%Y-%m-%d %H:%M:%S%z') ) )
python
def cache_etag(request, *argz, **kwz): '''Produce etag value for a cached page. Intended for usage in conditional views (@condition decorator).''' response, site, cachekey = kwz.get('_view_data') or initview(request) if not response: return None return fjcache.str2md5( '{0}--{1}--{2}'.format( site.id if site else 'x', cachekey, response[1].strftime('%Y-%m-%d %H:%M:%S%z') ) )
[ "def", "cache_etag", "(", "request", ",", "*", "argz", ",", "*", "*", "kwz", ")", ":", "response", ",", "site", ",", "cachekey", "=", "kwz", ".", "get", "(", "'_view_data'", ")", "or", "initview", "(", "request", ")", "if", "not", "response", ":", ...
Produce etag value for a cached page. Intended for usage in conditional views (@condition decorator).
[ "Produce", "etag", "value", "for", "a", "cached", "page", ".", "Intended", "for", "usage", "in", "conditional", "views", "(" ]
train
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/views.py#L28-L35