repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
shoebot/shoebot
shoebot/diagnose.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/diagnose.py#L122-L150
def shoebot_example(**shoebot_kwargs): """ Decorator to run some code in a bot instance. """ def decorator(f): def run(): from shoebot import ShoebotInstallError # https://github.com/shoebot/shoebot/issues/206 print(" Shoebot - %s:" % f.__name__.replace("_", " ")) try: import shoebot outputfile = "/tmp/shoebot-%s.png" % f.__name__ bot = shoebot.create_bot(outputfile=outputfile) f(bot) bot.finish() print(' [passed] : %s' % outputfile) print('') except ShoebotInstallError as e: print(' [failed]', e.args[0]) print('') except Exception: print(' [failed] - traceback:') for line in traceback.format_exc().splitlines(): print(' %s' % line) print('') return run return decorator
[ "def", "shoebot_example", "(", "*", "*", "shoebot_kwargs", ")", ":", "def", "decorator", "(", "f", ")", ":", "def", "run", "(", ")", ":", "from", "shoebot", "import", "ShoebotInstallError", "# https://github.com/shoebot/shoebot/issues/206", "print", "(", "\" Sh...
Decorator to run some code in a bot instance.
[ "Decorator", "to", "run", "some", "code", "in", "a", "bot", "instance", "." ]
python
valid
34.068966
jhuapl-boss/intern
intern/service/boss/v1/project.py
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/v1/project.py#L150-L177
def list_group_members(self, name, url_prefix, auth, session, send_opts): """Get the members of a group (does not include maintainers). Args: name (string): Name of group to query. url_prefix (string): Protocol + host such as https://api.theboss.io auth (string): Token to send in the request header. session (requests.Session): HTTP session to use for request. send_opts (dictionary): Additional arguments to pass to session.send(). Returns: (list[string]): List of member names. Raises: requests.HTTPError on failure. """ req = self.get_group_members_request( 'GET', 'application/json', url_prefix, auth, name) prep = session.prepare_request(req) resp = session.send(prep, **send_opts) if resp.status_code == 200: resp_json = resp.json() return resp_json['members'] msg = ('Failed getting members of group {}, got HTTP response: ({}) - {}'.format( name, resp.status_code, resp.text)) raise HTTPError(msg, request = req, response = resp)
[ "def", "list_group_members", "(", "self", ",", "name", ",", "url_prefix", ",", "auth", ",", "session", ",", "send_opts", ")", ":", "req", "=", "self", ".", "get_group_members_request", "(", "'GET'", ",", "'application/json'", ",", "url_prefix", ",", "auth", ...
Get the members of a group (does not include maintainers). Args: name (string): Name of group to query. url_prefix (string): Protocol + host such as https://api.theboss.io auth (string): Token to send in the request header. session (requests.Session): HTTP session to use for request. send_opts (dictionary): Additional arguments to pass to session.send(). Returns: (list[string]): List of member names. Raises: requests.HTTPError on failure.
[ "Get", "the", "members", "of", "a", "group", "(", "does", "not", "include", "maintainers", ")", "." ]
python
train
40.571429
foremast/foremast
src/foremast/awslambda/awslambda.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/awslambda.py#L106-L116
def _vpc_config(self): """Get VPC config.""" if self.vpc_enabled: subnets = get_subnets(env=self.env, region=self.region, purpose='internal')['subnet_ids'][self.region] security_groups = self._get_sg_ids() vpc_config = {'SubnetIds': subnets, 'SecurityGroupIds': security_groups} else: vpc_config = {'SubnetIds': [], 'SecurityGroupIds': []} LOG.debug("Lambda VPC config setup: %s", vpc_config) return vpc_config
[ "def", "_vpc_config", "(", "self", ")", ":", "if", "self", ".", "vpc_enabled", ":", "subnets", "=", "get_subnets", "(", "env", "=", "self", ".", "env", ",", "region", "=", "self", ".", "region", ",", "purpose", "=", "'internal'", ")", "[", "'subnet_ids...
Get VPC config.
[ "Get", "VPC", "config", "." ]
python
train
44.454545
eddiejessup/spatious
spatious/geom.py
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/geom.py#L242-L257
def spheres_intersect(ar, aR, br, bR): """Return whether or not two spheres intersect each other. Parameters ---------- ar, br: array-like, shape (n,) in n dimensions Coordinates of the centres of the spheres `a` and `b`. aR, bR: float Radiuses of the spheres `a` and `b`. Returns ------- intersecting: boolean True if the spheres intersect. """ return vector.vector_mag_sq(ar - br) < (aR + bR) ** 2
[ "def", "spheres_intersect", "(", "ar", ",", "aR", ",", "br", ",", "bR", ")", ":", "return", "vector", ".", "vector_mag_sq", "(", "ar", "-", "br", ")", "<", "(", "aR", "+", "bR", ")", "**", "2" ]
Return whether or not two spheres intersect each other. Parameters ---------- ar, br: array-like, shape (n,) in n dimensions Coordinates of the centres of the spheres `a` and `b`. aR, bR: float Radiuses of the spheres `a` and `b`. Returns ------- intersecting: boolean True if the spheres intersect.
[ "Return", "whether", "or", "not", "two", "spheres", "intersect", "each", "other", "." ]
python
train
28.125
GetmeUK/MongoFrames
mongoframes/frames.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/frames.py#L566-L621
def _dereference(cls, documents, references): """Dereference one or more documents""" # Dereference each reference for path, projection in references.items(): # Check there is a $ref in the projection, else skip it if '$ref' not in projection: continue # Collect Ids of documents to dereference ids = set() for document in documents: value = cls._path_to_value(path, document) if not value: continue if isinstance(value, list): ids.update(value) elif isinstance(value, dict): ids.update(value.values()) else: ids.add(value) # Find the referenced documents ref = projection.pop('$ref') frames = ref.many( {'_id': {'$in': list(ids)}}, projection=projection ) frames = {f._id: f for f in frames} # Add dereferenced frames to the document for document in documents: value = cls._path_to_value(path, document) if not value: continue if isinstance(value, list): # List of references value = [frames[id] for id in value if id in frames] elif isinstance(value, dict): # Dictionary of references value = {key: frames.get(id) for key, id in value.items()} else: value = frames.get(value, None) child_document = document keys = cls._path_to_keys(path) for key in keys[:-1]: child_document = child_document[key] child_document[keys[-1]] = value
[ "def", "_dereference", "(", "cls", ",", "documents", ",", "references", ")", ":", "# Dereference each reference", "for", "path", ",", "projection", "in", "references", ".", "items", "(", ")", ":", "# Check there is a $ref in the projection, else skip it", "if", "'$ref...
Dereference one or more documents
[ "Dereference", "one", "or", "more", "documents" ]
python
train
33.089286
saltstack/salt
salt/proxy/ssh_sample.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L193-L205
def service_start(name): ''' Start a "service" on the ssh server .. versionadded:: 2015.8.2 ''' cmd = 'start ' + name # Send the command to execute out, err = DETAILS['server'].sendline(cmd) # "scrape" the output and return the right fields as a dict return parse(out)
[ "def", "service_start", "(", "name", ")", ":", "cmd", "=", "'start '", "+", "name", "# Send the command to execute", "out", ",", "err", "=", "DETAILS", "[", "'server'", "]", ".", "sendline", "(", "cmd", ")", "# \"scrape\" the output and return the right fields as a ...
Start a "service" on the ssh server .. versionadded:: 2015.8.2
[ "Start", "a", "service", "on", "the", "ssh", "server" ]
python
train
22.692308
CalebBell/fluids
fluids/geometry.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L1130-L1189
def SA_torispheroidal(D, fd, fk): r'''Calculates surface area of a torispherical head according to [1]_. Somewhat involved. Equations are adapted to be used for a full head. .. math:: SA = S_1 + S_2 .. math:: S_1 = 2\pi D^2 f_d \alpha .. math:: S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1} \left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{ \alpha_1-\alpha_2}{f_k}\right)\right)\right) .. math:: \alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k} \right)^2}\right) .. math:: \alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25} .. math:: \alpha = \frac{a}{D_i} Parameters ---------- D : float Diameter of the main cylindrical section, [m] fd : float Dish-radius parameter = f; fD = dish radius [1/m] fk : float knuckle-radius parameter = k; kD = knuckle radius [1/m] Returns ------- SA : float Surface area [m^2] Examples -------- Example from [1]_. >>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205) 6.00394283477063 References ---------- .. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf Whitepaper. 2014. ''' alpha_1 = fd*(1 - (1 - ((0.5-fk)/(fd-fk))**2)**0.5) alpha_2 = fd - (fd**2 - 2*fd*fk + fk - 0.25)**0.5 alpha = alpha_1 # Up to top of dome S1 = 2*pi*D**2*fd*alpha_1 alpha = alpha_2 # up to top of torus S2_sub = asin((alpha-alpha_2)/fk) - asin((alpha_1-alpha_2)/fk) S2 = 2*pi*D**2*fk*(alpha - alpha_1 + (0.5-fk)*S2_sub) return S1 + S2
[ "def", "SA_torispheroidal", "(", "D", ",", "fd", ",", "fk", ")", ":", "alpha_1", "=", "fd", "*", "(", "1", "-", "(", "1", "-", "(", "(", "0.5", "-", "fk", ")", "/", "(", "fd", "-", "fk", ")", ")", "**", "2", ")", "**", "0.5", ")", "alpha_...
r'''Calculates surface area of a torispherical head according to [1]_. Somewhat involved. Equations are adapted to be used for a full head. .. math:: SA = S_1 + S_2 .. math:: S_1 = 2\pi D^2 f_d \alpha .. math:: S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1} \left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{ \alpha_1-\alpha_2}{f_k}\right)\right)\right) .. math:: \alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k} \right)^2}\right) .. math:: \alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25} .. math:: \alpha = \frac{a}{D_i} Parameters ---------- D : float Diameter of the main cylindrical section, [m] fd : float Dish-radius parameter = f; fD = dish radius [1/m] fk : float knuckle-radius parameter = k; kD = knuckle radius [1/m] Returns ------- SA : float Surface area [m^2] Examples -------- Example from [1]_. >>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205) 6.00394283477063 References ---------- .. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf Whitepaper. 2014.
[ "r", "Calculates", "surface", "area", "of", "a", "torispherical", "head", "according", "to", "[", "1", "]", "_", ".", "Somewhat", "involved", ".", "Equations", "are", "adapted", "to", "be", "used", "for", "a", "full", "head", "." ]
python
train
29.5
RudolfCardinal/pythonlib
cardinal_pythonlib/dbfunc.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/dbfunc.py#L107-L122
def dictfetchall(cursor: Cursor) -> List[Dict[str, Any]]: """ Return all rows from a cursor as a list of :class:`OrderedDict` objects. Args: cursor: the cursor Returns: a list (one item per row) of :class:`OrderedDict` objects whose key are column names and whose values are the row values """ columns = get_fieldnames_from_cursor(cursor) return [ OrderedDict(zip(columns, row)) for row in cursor.fetchall() ]
[ "def", "dictfetchall", "(", "cursor", ":", "Cursor", ")", "->", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "columns", "=", "get_fieldnames_from_cursor", "(", "cursor", ")", "return", "[", "OrderedDict", "(", "zip", "(", "columns", ",", ...
Return all rows from a cursor as a list of :class:`OrderedDict` objects. Args: cursor: the cursor Returns: a list (one item per row) of :class:`OrderedDict` objects whose key are column names and whose values are the row values
[ "Return", "all", "rows", "from", "a", "cursor", "as", "a", "list", "of", ":", "class", ":", "OrderedDict", "objects", "." ]
python
train
29.25
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L1522-L1537
def _get_word_end_cursor(self, position): """ Find the end of the word to the right the given position. If a sequence of non-word characters precedes the first word, skip over them. (This emulates the behavior of bash, emacs, etc.) """ document = self._control.document() end = self._get_end_cursor().position() while position < end and \ not is_letter_or_number(document.characterAt(position)): position += 1 while position < end and \ is_letter_or_number(document.characterAt(position)): position += 1 cursor = self._control.textCursor() cursor.setPosition(position) return cursor
[ "def", "_get_word_end_cursor", "(", "self", ",", "position", ")", ":", "document", "=", "self", ".", "_control", ".", "document", "(", ")", "end", "=", "self", ".", "_get_end_cursor", "(", ")", ".", "position", "(", ")", "while", "position", "<", "end", ...
Find the end of the word to the right the given position. If a sequence of non-word characters precedes the first word, skip over them. (This emulates the behavior of bash, emacs, etc.)
[ "Find", "the", "end", "of", "the", "word", "to", "the", "right", "the", "given", "position", ".", "If", "a", "sequence", "of", "non", "-", "word", "characters", "precedes", "the", "first", "word", "skip", "over", "them", ".", "(", "This", "emulates", "...
python
test
45.1875
saltstack/salt
salt/proxy/esxcluster.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/esxcluster.py#L197-L257
def init(opts): ''' This function gets called when the proxy starts up. For login the protocol and port are cached. ''' log.debug('Initting esxcluster proxy module in process %s', os.getpid()) log.debug('Validating esxcluster proxy input') schema = EsxclusterProxySchema.serialize() log.trace('schema = %s', schema) proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) log.trace('proxy_conf = %s', proxy_conf) try: jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: raise salt.exceptions.InvalidConfigError(exc) # Save mandatory fields in cache for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'): DETAILS[key] = proxy_conf[key] # Additional validation if DETAILS['mechanism'] == 'userpass': if 'username' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') for key in ('domain', 'principal'): DETAILS[key] = proxy_conf[key] # Save optional DETAILS['protocol'] = proxy_conf.get('protocol') DETAILS['port'] = proxy_conf.get('port') # Test connection if DETAILS['mechanism'] == 'userpass': # Get the correct login details log.debug('Retrieving credentials and testing vCenter connection for ' 'mehchanism \'userpass\'') try: username, password = find_credentials() DETAILS['password'] = password except salt.exceptions.SaltSystemExit as err: log.critical('Error: %s', err) return False return True
[ "def", "init", "(", "opts", ")", ":", "log", ".", "debug", "(", "'Initting esxcluster proxy module in process %s'", ",", "os", ".", "getpid", "(", ")", ")", "log", ".", "debug", "(", "'Validating esxcluster proxy input'", ")", "schema", "=", "EsxclusterProxySchema...
This function gets called when the proxy starts up. For login the protocol and port are cached.
[ "This", "function", "gets", "called", "when", "the", "proxy", "starts", "up", ".", "For", "login", "the", "protocol", "and", "port", "are", "cached", "." ]
python
train
39.721311
RudolfCardinal/pythonlib
cardinal_pythonlib/winservice.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/winservice.py#L375-L383
def fullname(self) -> str: """ Description of the process. """ fullname = "Process {}/{} ({})".format(self.procnum, self.nprocs, self.details.name) if self.running: fullname += " (PID={})".format(self.process.pid) return fullname
[ "def", "fullname", "(", "self", ")", "->", "str", ":", "fullname", "=", "\"Process {}/{} ({})\"", ".", "format", "(", "self", ".", "procnum", ",", "self", ".", "nprocs", ",", "self", ".", "details", ".", "name", ")", "if", "self", ".", "running", ":", ...
Description of the process.
[ "Description", "of", "the", "process", "." ]
python
train
36.444444
JoelBender/bacpypes
py25/bacpypes/appservice.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/appservice.py#L1194-L1302
def confirmation(self, pdu): """Packets coming up the stack are APDU's.""" if _debug: StateMachineAccessPoint._debug("confirmation %r", pdu) # check device communication control if self.dccEnableDisable == 'enable': if _debug: StateMachineAccessPoint._debug(" - communications enabled") elif self.dccEnableDisable == 'disable': if (pdu.apduType == 0) and (pdu.apduService == 17): if _debug: StateMachineAccessPoint._debug(" - continue with DCC request") elif (pdu.apduType == 0) and (pdu.apduService == 20): if _debug: StateMachineAccessPoint._debug(" - continue with reinitialize device") elif (pdu.apduType == 1) and (pdu.apduService == 8): if _debug: StateMachineAccessPoint._debug(" - continue with Who-Is") else: if _debug: StateMachineAccessPoint._debug(" - not a Who-Is, dropped") return elif self.dccEnableDisable == 'disableInitiation': if _debug: StateMachineAccessPoint._debug(" - initiation disabled") # make a more focused interpretation atype = apdu_types.get(pdu.apduType) if not atype: StateMachineAccessPoint._warning(" - unknown apduType: %r", pdu.apduType) return # decode it apdu = atype() apdu.decode(pdu) if _debug: StateMachineAccessPoint._debug(" - apdu: %r", apdu) if isinstance(apdu, ConfirmedRequestPDU): # find duplicates of this request for tr in self.serverTransactions: if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address): break else: # build a server transaction tr = ServerSSM(self, apdu.pduSource) # add it to our transactions to track it self.serverTransactions.append(tr) # let it run with the apdu tr.indication(apdu) elif isinstance(apdu, UnconfirmedRequestPDU): # deliver directly to the application self.sap_request(apdu) elif isinstance(apdu, SimpleAckPDU) \ or isinstance(apdu, ComplexAckPDU) \ or isinstance(apdu, ErrorPDU) \ or isinstance(apdu, RejectPDU): # find the client transaction this is acking for tr in self.clientTransactions: if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address): break else: return # send the packet on to the transaction tr.confirmation(apdu) elif isinstance(apdu, AbortPDU): # find the transaction being aborted if apdu.apduSrv: for tr in self.clientTransactions: if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address): break else: return # send the packet on to the transaction tr.confirmation(apdu) else: for tr in self.serverTransactions: if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address): break else: return # send the packet on to the transaction tr.indication(apdu) elif isinstance(apdu, SegmentAckPDU): # find the transaction being aborted if apdu.apduSrv: for tr in self.clientTransactions: if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address): break else: return # send the packet on to the transaction tr.confirmation(apdu) else: for tr in self.serverTransactions: if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address): break else: return # send the packet on to the transaction tr.indication(apdu) else: raise RuntimeError("invalid APDU (8)")
[ "def", "confirmation", "(", "self", ",", "pdu", ")", ":", "if", "_debug", ":", "StateMachineAccessPoint", ".", "_debug", "(", "\"confirmation %r\"", ",", "pdu", ")", "# check device communication control", "if", "self", ".", "dccEnableDisable", "==", "'enable'", "...
Packets coming up the stack are APDU's.
[ "Packets", "coming", "up", "the", "stack", "are", "APDU", "s", "." ]
python
train
39.091743
ronhanson/python-tbx
tbx/text.py
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L158-L169
def seconds_to_hms_verbose(t): """ Converts seconds float to 'H hours 8 minutes, 30 seconds' format """ hours = int((t / 3600)) mins = int((t / 60) % 60) secs = int(t % 60) return ' '.join([ (hours + ' hour' + ('s' if hours > 1 else '')) if hours > 0 else '', (mins + ' minute' + ('s' if mins > 1 else '')) if mins > 0 else '', (secs + ' second' + ('s' if secs > 1 else '')) if secs > 0 else '' ])
[ "def", "seconds_to_hms_verbose", "(", "t", ")", ":", "hours", "=", "int", "(", "(", "t", "/", "3600", ")", ")", "mins", "=", "int", "(", "(", "t", "/", "60", ")", "%", "60", ")", "secs", "=", "int", "(", "t", "%", "60", ")", "return", "' '", ...
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
[ "Converts", "seconds", "float", "to", "H", "hours", "8", "minutes", "30", "seconds", "format" ]
python
train
36.833333
epio/mantrid
mantrid/actions.py
https://github.com/epio/mantrid/blob/1c699f1a4b33888b533c19cb6d025173f2160576/mantrid/actions.py#L178-L189
def handle(self, sock, read_data, path, headers): "Just waits, and checks for other actions to replace us" for i in range(self.timeout // self.check_interval): # Sleep first eventlet.sleep(self.check_interval) # Check for another action action = self.balancer.resolve_host(self.host) if not isinstance(action, Spin): return action.handle(sock, read_data, path, headers) # OK, nothing happened, so give up. action = Static(self.balancer, self.host, self.matched_host, type="timeout") return action.handle(sock, read_data, path, headers)
[ "def", "handle", "(", "self", ",", "sock", ",", "read_data", ",", "path", ",", "headers", ")", ":", "for", "i", "in", "range", "(", "self", ".", "timeout", "//", "self", ".", "check_interval", ")", ":", "# Sleep first", "eventlet", ".", "sleep", "(", ...
Just waits, and checks for other actions to replace us
[ "Just", "waits", "and", "checks", "for", "other", "actions", "to", "replace", "us" ]
python
train
53.333333
apache/spark
python/pyspark/sql/functions.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L565-L575
def nanvl(col1, col2): """Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
[ "def", "nanvl", "(", "col1", ",", "col2", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "nanvl", "(", "_to_java_column", "(", "col1", ")", ",", "_to_java_column", "(", ...
Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
[ "Returns", "col1", "if", "it", "is", "not", "NaN", "or", "col2", "if", "col1", "is", "NaN", "." ]
python
train
48.818182
supercoderz/pyflightdata
pyflightdata/flightdata.py
https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L135-L160
def get_info_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the details of a particular aircraft by its tail number. This method can be used to get the details of a particular aircraft by its tail number. Details include the serial number, age etc along with links to the images of the aircraft. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_info_by_flight_number('VT-ANL') f.get_info_by_flight_number('VT-ANL',page=1,limit=10) """ url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_aircraft_data(url)
[ "def", "get_info_by_tail_number", "(", "self", ",", "tail_number", ",", "page", "=", "1", ",", "limit", "=", "100", ")", ":", "url", "=", "REG_BASE", ".", "format", "(", "tail_number", ",", "str", "(", "self", ".", "AUTH_TOKEN", ")", ",", "page", ",", ...
Fetch the details of a particular aircraft by its tail number. This method can be used to get the details of a particular aircraft by its tail number. Details include the serial number, age etc along with links to the images of the aircraft. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_info_by_flight_number('VT-ANL') f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
[ "Fetch", "the", "details", "of", "a", "particular", "aircraft", "by", "its", "tail", "number", "." ]
python
train
46.423077
awslabs/sockeye
sockeye/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L1210-L1225
def _expand_nbest_translation(translation: Translation) -> List[Translation]: """ Expand nbest translations in a single Translation object to one Translation object per nbest translation. :param translation: A Translation object. :return: A list of Translation objects. """ nbest_list = [] # type = List[Translation] for target_ids, attention_matrix, score in zip(translation.nbest_translations.target_ids_list, translation.nbest_translations.attention_matrices, translation.nbest_translations.scores): nbest_list.append(Translation(target_ids, attention_matrix, score, translation.beam_histories, estimated_reference_length=translation.estimated_reference_length)) return nbest_list
[ "def", "_expand_nbest_translation", "(", "translation", ":", "Translation", ")", "->", "List", "[", "Translation", "]", ":", "nbest_list", "=", "[", "]", "# type = List[Translation]", "for", "target_ids", ",", "attention_matrix", ",", "score", "in", "zip", "(", ...
Expand nbest translations in a single Translation object to one Translation object per nbest translation. :param translation: A Translation object. :return: A list of Translation objects.
[ "Expand", "nbest", "translations", "in", "a", "single", "Translation", "object", "to", "one", "Translation", "object", "per", "nbest", "translation", "." ]
python
train
53.6875
tensorflow/cleverhans
cleverhans/utils.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L247-L262
def create_logger(name): """ Create a logger object with the given name. If this is the first time that we call this method, then initialize the formatter. """ base = logging.getLogger("cleverhans") if len(base.handlers) == 0: ch = logging.StreamHandler() formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' + '%(message)s') ch.setFormatter(formatter) base.addHandler(ch) return base
[ "def", "create_logger", "(", "name", ")", ":", "base", "=", "logging", ".", "getLogger", "(", "\"cleverhans\"", ")", "if", "len", "(", "base", ".", "handlers", ")", "==", "0", ":", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "formatter", "=",...
Create a logger object with the given name. If this is the first time that we call this method, then initialize the formatter.
[ "Create", "a", "logger", "object", "with", "the", "given", "name", "." ]
python
train
28.375
senaite/senaite.core
bika/lims/browser/analyses/view.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L923-L946
def _folder_item_uncertainty(self, analysis_brain, item): """Fills the analysis' uncertainty to the item passed in. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row """ item["Uncertainty"] = "" if not self.has_permission(ViewResults, analysis_brain): return result = analysis_brain.getResult obj = self.get_object(analysis_brain) formatted = format_uncertainty(obj, result, decimalmark=self.dmk, sciformat=int(self.scinot)) if formatted: item["Uncertainty"] = formatted else: item["Uncertainty"] = obj.getUncertainty(result) if self.is_uncertainty_edition_allowed(analysis_brain): item["allow_edit"].append("Uncertainty")
[ "def", "_folder_item_uncertainty", "(", "self", ",", "analysis_brain", ",", "item", ")", ":", "item", "[", "\"Uncertainty\"", "]", "=", "\"\"", "if", "not", "self", ".", "has_permission", "(", "ViewResults", ",", "analysis_brain", ")", ":", "return", "result",...
Fills the analysis' uncertainty to the item passed in. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row
[ "Fills", "the", "analysis", "uncertainty", "to", "the", "item", "passed", "in", "." ]
python
train
35.958333
IdentityPython/pysaml2
src/saml2/mdstore.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mdstore.py#L747-L758
def load(self, *args, **kwargs): """ Imports metadata by the use of HTTP GET. If the fingerprint is known the file will be checked for compliance before it is imported. """ response = self.http.send(self.url) if response.status_code == 200: _txt = response.content return self.parse_and_check_signature(_txt) else: logger.info("Response status: %s", response.status_code) raise SourceNotFound(self.url)
[ "def", "load", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "http", ".", "send", "(", "self", ".", "url", ")", "if", "response", ".", "status_code", "==", "200", ":", "_txt", "=", "response", "."...
Imports metadata by the use of HTTP GET. If the fingerprint is known the file will be checked for compliance before it is imported.
[ "Imports", "metadata", "by", "the", "use", "of", "HTTP", "GET", ".", "If", "the", "fingerprint", "is", "known", "the", "file", "will", "be", "checked", "for", "compliance", "before", "it", "is", "imported", "." ]
python
train
41.25
saltstack/salt
salt/utils/smb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/smb.py#L150-L157
def string(self, writesize=None): ''' Looks like a file handle ''' if not self.finished: self.finished = True return self.content return ''
[ "def", "string", "(", "self", ",", "writesize", "=", "None", ")", ":", "if", "not", "self", ".", "finished", ":", "self", ".", "finished", "=", "True", "return", "self", ".", "content", "return", "''" ]
Looks like a file handle
[ "Looks", "like", "a", "file", "handle" ]
python
train
24.5
insightindustry/validator-collection
validator_collection/checkers.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L89-L148
def are_equivalent(*args, **kwargs): """Indicate if arguments passed to this function are equivalent. .. hint:: This checker operates recursively on the members contained within iterables and :class:`dict <python:dict>` objects. .. caution:: If you only pass one argument to this checker - even if it is an iterable - the checker will *always* return ``True``. To evaluate members of an iterable for equivalence, you should instead unpack the iterable into the function like so: .. code-block:: python obj = [1, 1, 1, 2] result = are_equivalent(*obj) # Will return ``False`` by unpacking and evaluating the iterable's members result = are_equivalent(obj) # Will always return True :param args: One or more values, passed as positional arguments. :returns: ``True`` if ``args`` are equivalent, and ``False`` if not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ if len(args) == 1: return True first_item = args[0] for item in args[1:]: if type(item) != type(first_item): # pylint: disable=C0123 return False if isinstance(item, dict): if not are_dicts_equivalent(item, first_item): return False elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)): if len(item) != len(first_item): return False for value in item: if value not in first_item: return False for value in first_item: if value not in item: return False else: if item != first_item: return False return True
[ "def", "are_equivalent", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "return", "True", "first_item", "=", "args", "[", "0", "]", "for", "item", "in", "args", "[", "1", ":", "]", ":", "if",...
Indicate if arguments passed to this function are equivalent. .. hint:: This checker operates recursively on the members contained within iterables and :class:`dict <python:dict>` objects. .. caution:: If you only pass one argument to this checker - even if it is an iterable - the checker will *always* return ``True``. To evaluate members of an iterable for equivalence, you should instead unpack the iterable into the function like so: .. code-block:: python obj = [1, 1, 1, 2] result = are_equivalent(*obj) # Will return ``False`` by unpacking and evaluating the iterable's members result = are_equivalent(obj) # Will always return True :param args: One or more values, passed as positional arguments. :returns: ``True`` if ``args`` are equivalent, and ``False`` if not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
[ "Indicate", "if", "arguments", "passed", "to", "this", "function", "are", "equivalent", "." ]
python
train
31.45
ankitmathur3193/song-cli
song/commands/MusicWebsiteParser/MrJattParser.py
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/MusicWebsiteParser/MrJattParser.py#L47-L76
def check_if_song_name(self,html): ''' Returns true if user entered artist or movie name ''' soup=BeautifulSoup(html) a_list=soup.findAll('a','touch') #print a_list text=[str(x) for x in a_list] text=''.join(text) text=text.lower() string1='download in 48 kbps' string2='download in 128 kbps' string3='download in 320 kbps' href='' if string3 in text: #print 'Downloading in 320 kbps' href=a_list[2].get('href') elif string2 in text: #print 'Downloading in 128 kbps' href=a_list[1].get('href') elif string1 in text: #print 'Downloading in 48 kbps' href=a_list[0].get('href') else: return (True,'nothing') return (False,href)
[ "def", "check_if_song_name", "(", "self", ",", "html", ")", ":", "soup", "=", "BeautifulSoup", "(", "html", ")", "a_list", "=", "soup", ".", "findAll", "(", "'a'", ",", "'touch'", ")", "#print a_list", "text", "=", "[", "str", "(", "x", ")", "for", "...
Returns true if user entered artist or movie name
[ "Returns", "true", "if", "user", "entered", "artist", "or", "movie", "name" ]
python
test
22.166667
herrjemand/flask-fido-u2f
flask_fido_u2f.py
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L247-L255
def get_enroll(self): """Returns new enroll seed""" devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] enroll = start_register(self.__appid, devices) enroll['status'] = 'ok' session['_u2f_enroll_'] = enroll.json return enroll
[ "def", "get_enroll", "(", "self", ")", ":", "devices", "=", "[", "DeviceRegistration", ".", "wrap", "(", "device", ")", "for", "device", "in", "self", ".", "__get_u2f_devices", "(", ")", "]", "enroll", "=", "start_register", "(", "self", ".", "__appid", ...
Returns new enroll seed
[ "Returns", "new", "enroll", "seed" ]
python
train
33.333333
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/engine_creator.py
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L210-L217
def create_mysql_oursql(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a mysql database using oursql. """ return create_engine( _create_mysql_oursql(username, password, host, port, database), **kwargs )
[ "def", "create_mysql_oursql", "(", "username", ",", "password", ",", "host", ",", "port", ",", "database", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "return", "create_engine", "(", "_create_mysql_oursql", "(", "username", ",", "password", ",", "...
create an engine connected to a mysql database using oursql.
[ "create", "an", "engine", "connected", "to", "a", "mysql", "database", "using", "oursql", "." ]
python
train
36.375
samuelcolvin/pydantic
pydantic/schema.py
https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/schema.py#L216-L268
def field_schema( field: Field, *, by_alias: bool = True, model_name_map: Dict[Type['main.BaseModel'], str], ref_prefix: Optional[str] = None, ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ Process a Pydantic field and return a tuple with a JSON Schema for it as the first item. Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they will be included in the definitions and referenced in the schema instead of included recursively. :param field: a Pydantic ``Field`` :param by_alias: use the defined alias (if any) in the returned schema :param model_name_map: used to generate the JSON Schema references to other models included in the definitions :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of #/definitions/ will be used :return: tuple of the schema for this field and additional definitions """ ref_prefix = ref_prefix or default_prefix schema_overrides = False schema = cast('Schema', field.schema) s = dict(title=schema.title or field.alias.title()) if schema.title: schema_overrides = True if schema.description: s['description'] = schema.description schema_overrides = True if not field.required and field.default is not None: s['default'] = encode_default(field.default) schema_overrides = True validation_schema = get_field_schema_validations(field) if validation_schema: s.update(validation_schema) schema_overrides = True f_schema, f_definitions = field_type_schema( field, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, ref_prefix=ref_prefix, ) # $ref will only be returned when there are no schema_overrides if '$ref' in f_schema: return f_schema, f_definitions else: s.update(f_schema) return s, f_definitions
[ "def", "field_schema", "(", "field", ":", "Field", ",", "*", ",", "by_alias", ":", "bool", "=", "True", ",", "model_name_map", ":", "Dict", "[", "Type", "[", "'main.BaseModel'", "]", ",", "str", "]", ",", "ref_prefix", ":", "Optional", "[", "str", "]",...
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item. Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they will be included in the definitions and referenced in the schema instead of included recursively. :param field: a Pydantic ``Field`` :param by_alias: use the defined alias (if any) in the returned schema :param model_name_map: used to generate the JSON Schema references to other models included in the definitions :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of #/definitions/ will be used :return: tuple of the schema for this field and additional definitions
[ "Process", "a", "Pydantic", "field", "and", "return", "a", "tuple", "with", "a", "JSON", "Schema", "for", "it", "as", "the", "first", "item", ".", "Also", "return", "a", "dictionary", "of", "definitions", "with", "models", "as", "keys", "and", "their", "...
python
train
39.396226
Yelp/kafka-utils
kafka_utils/kafka_consumer_manager/commands/offset_save.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/commands/offset_save.py#L142-L151
def write_offsets_to_file(cls, json_file_name, consumer_offsets_data): """Save built consumer-offsets data to given json file.""" # Save consumer-offsets to file with open(json_file_name, "w") as json_file: try: json.dump(consumer_offsets_data, json_file) except ValueError: print("Error: Invalid json data {data}".format(data=consumer_offsets_data)) raise print("Consumer offset data saved in json-file {file}".format(file=json_file_name))
[ "def", "write_offsets_to_file", "(", "cls", ",", "json_file_name", ",", "consumer_offsets_data", ")", ":", "# Save consumer-offsets to file", "with", "open", "(", "json_file_name", ",", "\"w\"", ")", "as", "json_file", ":", "try", ":", "json", ".", "dump", "(", ...
Save built consumer-offsets data to given json file.
[ "Save", "built", "consumer", "-", "offsets", "data", "to", "given", "json", "file", "." ]
python
train
53.9
waqasbhatti/astrobase
astrobase/fakelcs/recovery.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/fakelcs/recovery.py#L1454-L1574
def run_periodfinding(simbasedir, pfmethods=('gls','pdm','bls'), pfkwargs=({},{},{'startp':1.0,'maxtransitduration':0.3}), getblssnr=False, sigclip=5.0, nperiodworkers=10, ncontrolworkers=4, liststartindex=None, listmaxobjects=None): '''This runs periodfinding using several period-finders on a collection of fake LCs. As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take about 26 days in total to run on an invocation of this function using GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on a 2 x Xeon E5-2660v3 machine. Parameters ---------- pfmethods : sequence of str This is used to specify which periodfinders to run. These must be in the `lcproc.periodsearch.PFMETHODS` dict. pfkwargs : sequence of dict This is used to provide optional kwargs to the period-finders. getblssnr : bool If this is True, will run BLS SNR calculations for each object and magcol. This takes a while to run, so it's disabled (False) by default. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. nperiodworkers : int This is the number of parallel period-finding worker processes to use. ncontrolworkers : int This is the number of parallel period-finding control workers to use. Each control worker will launch `nperiodworkers` worker processes. liststartindex : int The starting index of processing. This refers to the filename list generated by running `glob.glob` on the fake LCs in `simbasedir`. maxobjects : int The maximum number of objects to process in this run. Use this with `liststartindex` to effectively distribute working on a large list of input light curves over several sessions or machines. Returns ------- str The path to the output summary pickle produced by `lcproc.periodsearch.parallel_pf` ''' # get the info from the simbasedir with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd: siminfo = pickle.load(infd) lcfpaths = siminfo['lcfpath'] pfdir = os.path.join(simbasedir,'periodfinding') # get the column defs for the fakelcs timecols = siminfo['timecols'] magcols = siminfo['magcols'] errcols = siminfo['errcols'] # register the fakelc pklc as a custom lcproc format # now we should be able to use all lcproc functions correctly fakelc_formatkey = 'fake-%s' % siminfo['lcformat'] lcproc.register_lcformat( fakelc_formatkey, '*-fakelc.pkl', timecols, magcols, errcols, 'astrobase.lcproc', '_read_pklc', magsarefluxes=siminfo['magsarefluxes'] ) if liststartindex: lcfpaths = lcfpaths[liststartindex:] if listmaxobjects: lcfpaths = lcfpaths[:listmaxobjects] pfinfo = periodsearch.parallel_pf(lcfpaths, pfdir, lcformat=fakelc_formatkey, pfmethods=pfmethods, pfkwargs=pfkwargs, getblssnr=getblssnr, sigclip=sigclip, nperiodworkers=nperiodworkers, ncontrolworkers=ncontrolworkers) with open(os.path.join(simbasedir, 'fakelc-periodsearch.pkl'),'wb') as outfd: pickle.dump(pfinfo, outfd, pickle.HIGHEST_PROTOCOL) return os.path.join(simbasedir,'fakelc-periodsearch.pkl')
[ "def", "run_periodfinding", "(", "simbasedir", ",", "pfmethods", "=", "(", "'gls'", ",", "'pdm'", ",", "'bls'", ")", ",", "pfkwargs", "=", "(", "{", "}", ",", "{", "}", ",", "{", "'startp'", ":", "1.0", ",", "'maxtransitduration'", ":", "0.3", "}", "...
This runs periodfinding using several period-finders on a collection of fake LCs. As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take about 26 days in total to run on an invocation of this function using GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on a 2 x Xeon E5-2660v3 machine. Parameters ---------- pfmethods : sequence of str This is used to specify which periodfinders to run. These must be in the `lcproc.periodsearch.PFMETHODS` dict. pfkwargs : sequence of dict This is used to provide optional kwargs to the period-finders. getblssnr : bool If this is True, will run BLS SNR calculations for each object and magcol. This takes a while to run, so it's disabled (False) by default. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. nperiodworkers : int This is the number of parallel period-finding worker processes to use. ncontrolworkers : int This is the number of parallel period-finding control workers to use. Each control worker will launch `nperiodworkers` worker processes. liststartindex : int The starting index of processing. This refers to the filename list generated by running `glob.glob` on the fake LCs in `simbasedir`. maxobjects : int The maximum number of objects to process in this run. Use this with `liststartindex` to effectively distribute working on a large list of input light curves over several sessions or machines. Returns ------- str The path to the output summary pickle produced by `lcproc.periodsearch.parallel_pf`
[ "This", "runs", "periodfinding", "using", "several", "period", "-", "finders", "on", "a", "collection", "of", "fake", "LCs", "." ]
python
valid
38.801653
yougov/pmxbot
pmxbot/commands.py
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/commands.py#L847-L852
def version(rest): "Get the version of pmxbot or one of its plugins" pkg = rest.strip() or 'pmxbot' if pkg.lower() == 'python': return sys.version.split()[0] return importlib_metadata.version(pkg)
[ "def", "version", "(", "rest", ")", ":", "pkg", "=", "rest", ".", "strip", "(", ")", "or", "'pmxbot'", "if", "pkg", ".", "lower", "(", ")", "==", "'python'", ":", "return", "sys", ".", "version", ".", "split", "(", ")", "[", "0", "]", "return", ...
Get the version of pmxbot or one of its plugins
[ "Get", "the", "version", "of", "pmxbot", "or", "one", "of", "its", "plugins" ]
python
train
32.833333
letuananh/chirptext
chirptext/deko.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L113-L120
def pos3(self): ''' Use pos-sc1-sc2 as POS ''' parts = [self.pos] if self.sc1 and self.sc1 != '*': parts.append(self.sc1) if self.sc2 and self.sc2 != '*': parts.append(self.sc2) return '-'.join(parts)
[ "def", "pos3", "(", "self", ")", ":", "parts", "=", "[", "self", ".", "pos", "]", "if", "self", ".", "sc1", "and", "self", ".", "sc1", "!=", "'*'", ":", "parts", ".", "append", "(", "self", ".", "sc1", ")", "if", "self", ".", "sc2", "and", "s...
Use pos-sc1-sc2 as POS
[ "Use", "pos", "-", "sc1", "-", "sc2", "as", "POS" ]
python
train
33.125
open-homeautomation/pknx
knxip/gatewayscanner.py
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/gatewayscanner.py#L69-L116
def start_search(self): """ Start the Gateway Search Request and return the address information :rtype: (string,int) :return: a tuple(string(IP),int(Port) when found or None when timeout occurs """ self._asyncio_loop = asyncio.get_event_loop() # Creating Broadcast Receiver coroutine_listen = self._asyncio_loop.create_datagram_endpoint( lambda: self.KNXSearchBroadcastReceiverProtocol( self._process_response, self._timeout_handling, self._timeout, self._asyncio_loop ), local_addr=(self._broadcast_ip_address, 0) ) self._listener_transport, listener_protocol = \ self._asyncio_loop.run_until_complete(coroutine_listen) # We are ready to fire the broadcast message coroutine_broadcaster = self._asyncio_loop.create_datagram_endpoint( lambda: self.KNXSearchBroadcastProtocol( self._asyncio_loop, self._listener_transport.get_extra_info('sockname') [1]), remote_addr=(self._broadcast_address, self._broadcast_port)) self._broadcaster_transport, broadcast_protocol = \ self._asyncio_loop.run_until_complete(coroutine_broadcaster) # Waiting for all Broadcast receive or timeout self._asyncio_loop.run_forever() # Got Response or Timeout if self._resolved_gateway_ip_address is None and \ self._resolved_gateway_ip_port is None: LOGGER.debug("Gateway not found!") return None else: LOGGER.debug("Gateway found at %s:%s", self._resolved_gateway_ip_address, self._resolved_gateway_ip_port) return self._resolved_gateway_ip_address, \ self._resolved_gateway_ip_port
[ "def", "start_search", "(", "self", ")", ":", "self", ".", "_asyncio_loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "# Creating Broadcast Receiver", "coroutine_listen", "=", "self", ".", "_asyncio_loop", ".", "create_datagram_endpoint", "(", "lambda", ":", ...
Start the Gateway Search Request and return the address information :rtype: (string,int) :return: a tuple(string(IP),int(Port) when found or None when timeout occurs
[ "Start", "the", "Gateway", "Search", "Request", "and", "return", "the", "address", "information" ]
python
train
39.229167
sci-bots/svg-model
svg_model/__init__.py
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L260-L304
def scale_points(df_points, scale=INKSCAPE_PPmm.magnitude, inplace=False): ''' Translate points such that bounding box is anchored at (0, 0) and scale ``x`` and ``y`` columns of input frame by specified :data:`scale`. Parameters ---------- df_points : pandas.DataFrame Table of ``x``/``y`` point positions. Must have at least the following columns: - ``x``: x-coordinate - ``y``: y-coordinate scale : float, optional Factor to scale points by. By default, scale to millimeters based on Inkscape default of 90 pixels-per-inch. scale : float, optional Factor to scale points by. in_place : bool, optional If ``True``, input frame will be modified. Otherwise, the scaled points are written to a new frame, leaving the input frame unmodified. Returns ------- pandas.DataFrame Input frame with the points translated such that bounding box is anchored at (0, 0) and ``x`` and ``y`` values scaled by specified :data:`scale`. ''' if not inplace: df_points = df_points.copy() # Offset device, such that all coordinates are >= 0. df_points.x -= df_points.x.min() df_points.y -= df_points.y.min() # Scale path coordinates. df_points.x /= scale df_points.y /= scale return df_points
[ "def", "scale_points", "(", "df_points", ",", "scale", "=", "INKSCAPE_PPmm", ".", "magnitude", ",", "inplace", "=", "False", ")", ":", "if", "not", "inplace", ":", "df_points", "=", "df_points", ".", "copy", "(", ")", "# Offset device, such that all coordinates ...
Translate points such that bounding box is anchored at (0, 0) and scale ``x`` and ``y`` columns of input frame by specified :data:`scale`. Parameters ---------- df_points : pandas.DataFrame Table of ``x``/``y`` point positions. Must have at least the following columns: - ``x``: x-coordinate - ``y``: y-coordinate scale : float, optional Factor to scale points by. By default, scale to millimeters based on Inkscape default of 90 pixels-per-inch. scale : float, optional Factor to scale points by. in_place : bool, optional If ``True``, input frame will be modified. Otherwise, the scaled points are written to a new frame, leaving the input frame unmodified. Returns ------- pandas.DataFrame Input frame with the points translated such that bounding box is anchored at (0, 0) and ``x`` and ``y`` values scaled by specified :data:`scale`.
[ "Translate", "points", "such", "that", "bounding", "box", "is", "anchored", "at", "(", "0", "0", ")", "and", "scale", "x", "and", "y", "columns", "of", "input", "frame", "by", "specified", ":", "data", ":", "scale", "." ]
python
train
29.777778
mar10/wsgidav
wsgidav/util.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/util.py#L1103-L1185
def evaluate_http_conditionals(dav_res, last_modified, entitytag, environ): """Handle 'If-...:' headers (but not 'If:' header). If-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24 Only perform the action if the client supplied entity matches the same entity on the server. This is mainly for methods like PUT to only update a resource if it has not been modified since the user last updated it. If-Match: "737060cd8c284d8af7ad3082f209582d" If-Modified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25 Allows a 304 Not Modified to be returned if content is unchanged If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT If-None-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26 Allows a 304 Not Modified to be returned if content is unchanged, see HTTP ETag If-None-Match: "737060cd8c284d8af7ad3082f209582d" If-Unmodified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28 Only send the response if the entity has not been modified since a specific time. """ if not dav_res: return # Conditions # An HTTP/1.1 origin server, upon receiving a conditional request that includes both a # Last-Modified date (e.g., in an If-Modified-Since or If-Unmodified-Since header field) and # one or more entity tags (e.g., in an If-Match, If-None-Match, or If-Range header field) as # cache validators, MUST NOT return a response status of 304 (Not Modified) unless doing so # is consistent with all of the conditional header fields in the request. if "HTTP_IF_MATCH" in environ and dav_res.support_etag(): ifmatchlist = environ["HTTP_IF_MATCH"].split(",") for ifmatchtag in ifmatchlist: ifmatchtag = ifmatchtag.strip(' "\t') if ifmatchtag == entitytag or ifmatchtag == "*": break raise DAVError(HTTP_PRECONDITION_FAILED, "If-Match header condition failed") # TODO: after the refactoring ifModifiedSinceFailed = False if "HTTP_IF_MODIFIED_SINCE" in environ and dav_res.support_modified(): ifmodtime = parse_time_string(environ["HTTP_IF_MODIFIED_SINCE"]) if ifmodtime and ifmodtime > last_modified: ifModifiedSinceFailed = True # If-None-Match # If none of the entity tags match, then the server MAY perform the requested method as if the # If-None-Match header field did not exist, but MUST also ignore any If-Modified-Since header # field (s) in the request. That is, if no entity tags match, then the server MUST NOT return # a 304 (Not Modified) response. ignoreIfModifiedSince = False if "HTTP_IF_NONE_MATCH" in environ and dav_res.support_etag(): ifmatchlist = environ["HTTP_IF_NONE_MATCH"].split(",") for ifmatchtag in ifmatchlist: ifmatchtag = ifmatchtag.strip(' "\t') if ifmatchtag == entitytag or ifmatchtag == "*": # ETag matched. If it's a GET request and we don't have an # conflicting If-Modified header, we return NOT_MODIFIED if ( environ["REQUEST_METHOD"] in ("GET", "HEAD") and not ifModifiedSinceFailed ): raise DAVError(HTTP_NOT_MODIFIED, "If-None-Match header failed") raise DAVError( HTTP_PRECONDITION_FAILED, "If-None-Match header condition failed" ) ignoreIfModifiedSince = True if "HTTP_IF_UNMODIFIED_SINCE" in environ and dav_res.support_modified(): ifunmodtime = parse_time_string(environ["HTTP_IF_UNMODIFIED_SINCE"]) if ifunmodtime and ifunmodtime <= last_modified: raise DAVError( HTTP_PRECONDITION_FAILED, "If-Unmodified-Since header condition failed" ) if ifModifiedSinceFailed and not ignoreIfModifiedSince: raise DAVError(HTTP_NOT_MODIFIED, "If-Modified-Since header condition failed") return
[ "def", "evaluate_http_conditionals", "(", "dav_res", ",", "last_modified", ",", "entitytag", ",", "environ", ")", ":", "if", "not", "dav_res", ":", "return", "# Conditions", "# An HTTP/1.1 origin server, upon receiving a conditional request that includes both a", "# Last-Modifi...
Handle 'If-...:' headers (but not 'If:' header). If-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24 Only perform the action if the client supplied entity matches the same entity on the server. This is mainly for methods like PUT to only update a resource if it has not been modified since the user last updated it. If-Match: "737060cd8c284d8af7ad3082f209582d" If-Modified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25 Allows a 304 Not Modified to be returned if content is unchanged If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT If-None-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26 Allows a 304 Not Modified to be returned if content is unchanged, see HTTP ETag If-None-Match: "737060cd8c284d8af7ad3082f209582d" If-Unmodified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28 Only send the response if the entity has not been modified since a specific time.
[ "Handle", "If", "-", "...", ":", "headers", "(", "but", "not", "If", ":", "header", ")", "." ]
python
valid
48.951807
SwissDataScienceCenter/renku-python
renku/api/datasets.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/api/datasets.py#L443-L458
def check_for_git_repo(url): """Check if a url points to a git repository.""" u = parse.urlparse(url) is_git = False if os.path.splitext(u.path)[1] == '.git': is_git = True elif u.scheme in ('', 'file'): from git import InvalidGitRepositoryError, Repo try: Repo(u.path, search_parent_directories=True) is_git = True except InvalidGitRepositoryError: is_git = False return is_git
[ "def", "check_for_git_repo", "(", "url", ")", ":", "u", "=", "parse", ".", "urlparse", "(", "url", ")", "is_git", "=", "False", "if", "os", ".", "path", ".", "splitext", "(", "u", ".", "path", ")", "[", "1", "]", "==", "'.git'", ":", "is_git", "=...
Check if a url points to a git repository.
[ "Check", "if", "a", "url", "points", "to", "a", "git", "repository", "." ]
python
train
28.5625
lingthio/Flask-User
flask_user/db_adapters/dynamo_db_adapter.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/db_adapters/dynamo_db_adapter.py#L52-L71
def find_objects(self, ObjectClass, **kwargs): """ Retrieve all objects of type ``ObjectClass``, matching the filters specified in ``**kwargs`` -- case sensitive. """ print('dynamo.find_objects(%s, %s)' % (ObjectClass, str(kwargs))) query = self.db.engine.query(ObjectClass) for field_name, field_value in kwargs.items(): # Make sure that ObjectClass has a 'field_name' property field = getattr(ObjectClass, field_name, None) if field is None: raise KeyError("DynamoDBAdapter.find_objects(): Class '%s' has no field '%s'." % (ObjectClass, field_name)) # Add a case sensitive filter to the query query = query.filter(field == field_value) # Execute query return query.all(desc=True)
[ "def", "find_objects", "(", "self", ",", "ObjectClass", ",", "*", "*", "kwargs", ")", ":", "print", "(", "'dynamo.find_objects(%s, %s)'", "%", "(", "ObjectClass", ",", "str", "(", "kwargs", ")", ")", ")", "query", "=", "self", ".", "db", ".", "engine", ...
Retrieve all objects of type ``ObjectClass``, matching the filters specified in ``**kwargs`` -- case sensitive.
[ "Retrieve", "all", "objects", "of", "type", "ObjectClass", "matching", "the", "filters", "specified", "in", "**", "kwargs", "--", "case", "sensitive", "." ]
python
train
40.35
scivision/pymap3d
pymap3d/ecef.py
https://github.com/scivision/pymap3d/blob/c9cf676594611cdb52ff7e0eca6388c80ed4f63f/pymap3d/ecef.py#L351-L381
def uvw2enu(u: float, v: float, w: float, lat0: float, lon0: float, deg: bool = True) -> Tuple[float, float, float]: """ Parameters ---------- u : float or numpy.ndarray of float v : float or numpy.ndarray of float w : float or numpy.ndarray of float Results ------- East : float or numpy.ndarray of float target east ENU coordinate (meters) North : float or numpy.ndarray of float target north ENU coordinate (meters) Up : float or numpy.ndarray of float target up ENU coordinate (meters) """ if deg: lat0 = radians(lat0) lon0 = radians(lon0) t = cos(lon0) * u + sin(lon0) * v East = -sin(lon0) * u + cos(lon0) * v Up = cos(lat0) * t + sin(lat0) * w North = -sin(lat0) * t + cos(lat0) * w return East, North, Up
[ "def", "uvw2enu", "(", "u", ":", "float", ",", "v", ":", "float", ",", "w", ":", "float", ",", "lat0", ":", "float", ",", "lon0", ":", "float", ",", "deg", ":", "bool", "=", "True", ")", "->", "Tuple", "[", "float", ",", "float", ",", "float", ...
Parameters ---------- u : float or numpy.ndarray of float v : float or numpy.ndarray of float w : float or numpy.ndarray of float Results ------- East : float or numpy.ndarray of float target east ENU coordinate (meters) North : float or numpy.ndarray of float target north ENU coordinate (meters) Up : float or numpy.ndarray of float target up ENU coordinate (meters)
[ "Parameters", "----------" ]
python
train
26.193548
mrcagney/gtfstk
gtfstk/validators.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/validators.py#L230-L282
def check_table( problems: List, table: str, df: DataFrame, condition, message: str, type_: str = "error", ) -> List: """ Check the given GTFS table for the given problem condition. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` condition : boolean expression One involving ``df``, e.g.`df['route_id'].map(is_valid_str)`` message : string Problem message, e.g. ``'Invalid route_id'`` type_ : string ``'error'`` or ``'warning'`` indicating the type of problem encountered Returns ------- list The ``problems`` list extended as follows. Record the indices of ``df`` that statisfy the condition. If the list of indices is nonempty, append to the problems the item ``[type_, message, table, indices]``; otherwise do not append anything. """ indices = df.loc[condition].index.tolist() if indices: problems.append([type_, message, table, indices]) return problems
[ "def", "check_table", "(", "problems", ":", "List", ",", "table", ":", "str", ",", "df", ":", "DataFrame", ",", "condition", ",", "message", ":", "str", ",", "type_", ":", "str", "=", "\"error\"", ",", ")", "->", "List", ":", "indices", "=", "df", ...
Check the given GTFS table for the given problem condition. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` condition : boolean expression One involving ``df``, e.g.`df['route_id'].map(is_valid_str)`` message : string Problem message, e.g. ``'Invalid route_id'`` type_ : string ``'error'`` or ``'warning'`` indicating the type of problem encountered Returns ------- list The ``problems`` list extended as follows. Record the indices of ``df`` that statisfy the condition. If the list of indices is nonempty, append to the problems the item ``[type_, message, table, indices]``; otherwise do not append anything.
[ "Check", "the", "given", "GTFS", "table", "for", "the", "given", "problem", "condition", "." ]
python
train
29.716981
mwouts/jupytext
jupytext/jupytext.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/jupytext.py#L45-L90
def reads(self, s, **_): """Read a notebook represented as text""" if self.fmt.get('format_name') == 'pandoc': return md_to_notebook(s) lines = s.splitlines() cells = [] metadata, jupyter_md, header_cell, pos = header_to_metadata_and_cell(lines, self.implementation.header_prefix, self.implementation.extension) default_language = default_language_from_metadata_and_ext(metadata, self.implementation.extension) self.update_fmt_with_notebook_options(metadata) if header_cell: cells.append(header_cell) lines = lines[pos:] if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'): cells.append(new_code_cell(source='%matplotlib inline')) cell_metadata = set() while lines: reader = self.implementation.cell_reader_class(self.fmt, default_language) cell, pos = reader.read(lines) cells.append(cell) cell_metadata.update(cell.metadata.keys()) if pos <= 0: raise Exception('Blocked at lines ' + '\n'.join(lines[:6])) # pragma: no cover lines = lines[pos:] update_metadata_filters(metadata, jupyter_md, cell_metadata) set_main_and_cell_language(metadata, cells, self.implementation.extension) if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'): filtered_cells = [] for i, cell in enumerate(cells): if cell.source == '' and i > 0 and i + 1 < len(cells) \ and cells[i - 1].cell_type != 'markdown' and cells[i + 1].cell_type != 'markdown': continue filtered_cells.append(cell) cells = filtered_cells return new_notebook(cells=cells, metadata=metadata)
[ "def", "reads", "(", "self", ",", "s", ",", "*", "*", "_", ")", ":", "if", "self", ".", "fmt", ".", "get", "(", "'format_name'", ")", "==", "'pandoc'", ":", "return", "md_to_notebook", "(", "s", ")", "lines", "=", "s", ".", "splitlines", "(", ")"...
Read a notebook represented as text
[ "Read", "a", "notebook", "represented", "as", "text" ]
python
train
43.521739
crytic/slither
slither/slither.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/slither.py#L138-L145
def register_printer(self, printer_class): """ :param printer_class: Class inheriting from `AbstractPrinter`. """ self._check_common_things('printer', printer_class, AbstractPrinter, self._printers) instance = printer_class(self, logger_printer) self._printers.append(instance)
[ "def", "register_printer", "(", "self", ",", "printer_class", ")", ":", "self", ".", "_check_common_things", "(", "'printer'", ",", "printer_class", ",", "AbstractPrinter", ",", "self", ".", "_printers", ")", "instance", "=", "printer_class", "(", "self", ",", ...
:param printer_class: Class inheriting from `AbstractPrinter`.
[ ":", "param", "printer_class", ":", "Class", "inheriting", "from", "AbstractPrinter", "." ]
python
train
39.875
mosesschwartz/scrypture
scrypture/scrypture.py
https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/scrypture.py#L350-L377
def load_scripts(): '''Import all of the modules named in REGISTERED_SCRIPTS''' # Add scrypture package package to the path before importing # so everything can import everything else regardless of package scrypture_dir = os.path.realpath( os.path.abspath( os.path.split( inspect.getfile( inspect.currentframe() ))[0])) if scrypture_dir not in sys.path: sys.path.insert(0, scrypture_dir) # Load list of registered scripts registered_scripts = app.config['REGISTERED_SCRIPTS'] for script in registered_scripts: try: s = import_module('.'+script, package=os.path.split(app.config['SCRIPTS_DIR'])[-1]) s.package = s.__name__.split('.')[1] #remove package from script name: script_name = script.split('.')[-1] registered_modules[script_name] = s except Exception as e: logging.warning('Could not import ' + \ str(script)+': '+str(e.message)) logging.debug(str(traceback.format_exc())) continue
[ "def", "load_scripts", "(", ")", ":", "# Add scrypture package package to the path before importing", "# so everything can import everything else regardless of package", "scrypture_dir", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "abspath", "(", "...
Import all of the modules named in REGISTERED_SCRIPTS
[ "Import", "all", "of", "the", "modules", "named", "in", "REGISTERED_SCRIPTS" ]
python
train
40.785714
bububa/pyTOP
pyTOP/simba.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/simba.py#L140-L149
def update(self, campaign_id, schedule, nick=None): '''xxxxx.xxxxx.campaign.schedule.update =================================== 更新一个推广计划的分时折扣设置''' request = TOPRequest('xxxxx.xxxxx.campaign.schedule.update') request['campaign_id'] = campaign_id request['schedule'] = schedule if nick!=None: request['nick'] = nick self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignSchedule}) return self.result
[ "def", "update", "(", "self", ",", "campaign_id", ",", "schedule", ",", "nick", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'xxxxx.xxxxx.campaign.schedule.update'", ")", "request", "[", "'campaign_id'", "]", "=", "campaign_id", "request", "[", "...
xxxxx.xxxxx.campaign.schedule.update =================================== 更新一个推广计划的分时折扣设置
[ "xxxxx", ".", "xxxxx", ".", "campaign", ".", "schedule", ".", "update", "===================================", "更新一个推广计划的分时折扣设置" ]
python
train
53.6
brainiak/brainiak
brainiak/utils/utils.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L72-L92
def from_sym_2_tri(symm): """convert a 2D symmetric matrix to an upper triangular matrix in 1D format Parameters ---------- symm : 2D array Symmetric matrix Returns ------- tri: 1D array Contains elements of upper triangular matrix """ inds = np.triu_indices_from(symm) tri = symm[inds] return tri
[ "def", "from_sym_2_tri", "(", "symm", ")", ":", "inds", "=", "np", ".", "triu_indices_from", "(", "symm", ")", "tri", "=", "symm", "[", "inds", "]", "return", "tri" ]
convert a 2D symmetric matrix to an upper triangular matrix in 1D format Parameters ---------- symm : 2D array Symmetric matrix Returns ------- tri: 1D array Contains elements of upper triangular matrix
[ "convert", "a", "2D", "symmetric", "matrix", "to", "an", "upper", "triangular", "matrix", "in", "1D", "format" ]
python
train
16.857143
horejsek/python-webdriverwrapper
webdriverwrapper/info.py
https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/info.py#L27-L39
def allowed_info_messages(*info_messages): """ Decorator ignoring defined info messages at the end of test method. As param use what :py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages` returns. .. versionadded:: 2.0 """ def wrapper(func): setattr(func, ALLOWED_INFO_MESSAGES, info_messages) return func return wrapper
[ "def", "allowed_info_messages", "(", "*", "info_messages", ")", ":", "def", "wrapper", "(", "func", ")", ":", "setattr", "(", "func", ",", "ALLOWED_INFO_MESSAGES", ",", "info_messages", ")", "return", "func", "return", "wrapper" ]
Decorator ignoring defined info messages at the end of test method. As param use what :py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages` returns. .. versionadded:: 2.0
[ "Decorator", "ignoring", "defined", "info", "messages", "at", "the", "end", "of", "test", "method", ".", "As", "param", "use", "what", ":", "py", ":", "meth", ":", "~", ".", "WebdriverWrapperInfoMixin", ".", "get_info_messages", "returns", "." ]
python
train
27.923077
StackStorm/pybind
pybind/nos/v7_2_0/rbridge_id/vrf/address_family/ipv6/unicast/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/vrf/address_family/ipv6/unicast/__init__.py#L136-L157
def _set_route_target(self, v, load=False): """ Setter method for route_target, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/route_target (list) If this variable is read-only (config: false) in the source YANG file, then _set_route_target is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_target() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("action target_community",route_target.route_target, yang_name="route-target", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}), is_container='list', yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """route_target must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("action target_community",route_target.route_target, yang_name="route-target", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}), is_container='list', yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)""", }) self.__route_target = t if hasattr(self, '_set'): self._set()
[ "def", "_set_route_target", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "b...
Setter method for route_target, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/route_target (list) If this variable is read-only (config: false) in the source YANG file, then _set_route_target is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_target() directly.
[ "Setter", "method", "for", "route_target", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "vrf", "/", "address_family", "/", "ipv6", "/", "unicast", "/", "route_target", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", ...
python
train
123.090909
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_devop.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_devop.py#L16-L35
def cmd_devop(self, args): '''device operations''' usage = "Usage: devop <read|write> <spi|i2c> name bus address" if len(args) < 5: print(usage) return if args[1] == 'spi': bustype = mavutil.mavlink.DEVICE_OP_BUSTYPE_SPI elif args[1] == 'i2c': bustype = mavutil.mavlink.DEVICE_OP_BUSTYPE_I2C else: print(usage) if args[0] == 'read': self.devop_read(args[2:], bustype) elif args[0] == 'write': self.devop_write(args[2:], bustype) else: print(usage)
[ "def", "cmd_devop", "(", "self", ",", "args", ")", ":", "usage", "=", "\"Usage: devop <read|write> <spi|i2c> name bus address\"", "if", "len", "(", "args", ")", "<", "5", ":", "print", "(", "usage", ")", "return", "if", "args", "[", "1", "]", "==", "'spi'"...
device operations
[ "device", "operations" ]
python
train
29.9
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L3312-L3356
def rename(self, arr, new_name=True): """ Rename an array to find a name that isn't already in the list Parameters ---------- arr: InteractiveBase A :class:`InteractiveArray` or :class:`InteractiveList` instance whose name shall be checked new_name: bool or str If False, and the ``arr_name`` attribute of the new array is already in the list, a ValueError is raised. If True and the ``arr_name`` attribute of the new array is not already in the list, the name is not changed. Otherwise, if the array name is already in use, `new_name` is set to 'arr{0}'. If not True, this will be used for renaming (if the array name of `arr` is in use or not). ``'{0}'`` is replaced by a counter Returns ------- InteractiveBase `arr` with changed ``arr_name`` attribute bool or None True, if the array has been renamed, False if not and None if the array is already in the list Raises ------ ValueError If it was impossible to find a name that isn't already in the list ValueError If `new_name` is False and the array is already in the list""" name_in_me = arr.psy.arr_name in self.arr_names if not name_in_me: return arr, False elif name_in_me and not self._contains_array(arr): if new_name is False: raise ValueError( "Array name %s is already in use! Set the `new_name` " "parameter to None for renaming!" % arr.psy.arr_name) elif new_name is True: new_name = new_name if isstring(new_name) else 'arr{0}' arr.psy.arr_name = self.next_available_name(new_name) return arr, True return arr, None
[ "def", "rename", "(", "self", ",", "arr", ",", "new_name", "=", "True", ")", ":", "name_in_me", "=", "arr", ".", "psy", ".", "arr_name", "in", "self", ".", "arr_names", "if", "not", "name_in_me", ":", "return", "arr", ",", "False", "elif", "name_in_me"...
Rename an array to find a name that isn't already in the list Parameters ---------- arr: InteractiveBase A :class:`InteractiveArray` or :class:`InteractiveList` instance whose name shall be checked new_name: bool or str If False, and the ``arr_name`` attribute of the new array is already in the list, a ValueError is raised. If True and the ``arr_name`` attribute of the new array is not already in the list, the name is not changed. Otherwise, if the array name is already in use, `new_name` is set to 'arr{0}'. If not True, this will be used for renaming (if the array name of `arr` is in use or not). ``'{0}'`` is replaced by a counter Returns ------- InteractiveBase `arr` with changed ``arr_name`` attribute bool or None True, if the array has been renamed, False if not and None if the array is already in the list Raises ------ ValueError If it was impossible to find a name that isn't already in the list ValueError If `new_name` is False and the array is already in the list
[ "Rename", "an", "array", "to", "find", "a", "name", "that", "isn", "t", "already", "in", "the", "list" ]
python
train
42.066667
thecynic/pylutron
pylutron/__init__.py
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L640-L644
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" Handling "%s" Action: %s Params: %s"' % ( self._keypad.name, self.name, action, params)) return False
[ "def", "handle_update", "(", "self", ",", "action", ",", "params", ")", ":", "_LOGGER", ".", "debug", "(", "'Keypad: \"%s\" Handling \"%s\" Action: %s Params: %s\"'", "%", "(", "self", ".", "_keypad", ".", "name", ",", "self", ".", "name", ",", "action", ",", ...
Handle the specified action on this component.
[ "Handle", "the", "specified", "action", "on", "this", "component", "." ]
python
train
49.8
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/ResourceMeta.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/ResourceMeta.py#L148-L165
def set_label(self, label, lang=None): """Sets the `label` metadata property on your Thing/Point. Only one label is allowed per language, so any other labels in this language are removed before adding this one Raises `ValueError` containing an error message if the parameters fail validation `label` (mandatory) (string) the new text of the label `lang` (optional) (string) The two-character ISO 639-1 language code to use for your label. None means use the default language for your agent. See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__) """ label = Validation.label_check_convert(label) lang = Validation.lang_check_convert(lang, default=self._default_lang) # remove any other labels with this language before adding self.delete_label(lang) subj = self._get_uuid_uriref() self._graph.add((subj, self._labelPredicate, Literal(label, lang)))
[ "def", "set_label", "(", "self", ",", "label", ",", "lang", "=", "None", ")", ":", "label", "=", "Validation", ".", "label_check_convert", "(", "label", ")", "lang", "=", "Validation", ".", "lang_check_convert", "(", "lang", ",", "default", "=", "self", ...
Sets the `label` metadata property on your Thing/Point. Only one label is allowed per language, so any other labels in this language are removed before adding this one Raises `ValueError` containing an error message if the parameters fail validation `label` (mandatory) (string) the new text of the label `lang` (optional) (string) The two-character ISO 639-1 language code to use for your label. None means use the default language for your agent. See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__)
[ "Sets", "the", "label", "metadata", "property", "on", "your", "Thing", "/", "Point", ".", "Only", "one", "label", "is", "allowed", "per", "language", "so", "any", "other", "labels", "in", "this", "language", "are", "removed", "before", "adding", "this", "o...
python
train
53.333333
cloud-custodian/cloud-custodian
tools/ops/mugc.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/ops/mugc.py#L113-L129
def resources_gc_prefix(options, policy_config, policy_collection): """Garbage collect old custodian policies based on prefix. We attempt to introspect to find the event sources for a policy but without the old configuration this is implicit. """ # Classify policies by region policy_regions = {} for p in policy_collection: if p.execution_mode == 'poll': continue policy_regions.setdefault(p.options.region, []).append(p) regions = get_gc_regions(options.regions) for r in regions: region_gc(options, r, policy_config, policy_regions.get(r, []))
[ "def", "resources_gc_prefix", "(", "options", ",", "policy_config", ",", "policy_collection", ")", ":", "# Classify policies by region", "policy_regions", "=", "{", "}", "for", "p", "in", "policy_collection", ":", "if", "p", ".", "execution_mode", "==", "'poll'", ...
Garbage collect old custodian policies based on prefix. We attempt to introspect to find the event sources for a policy but without the old configuration this is implicit.
[ "Garbage", "collect", "old", "custodian", "policies", "based", "on", "prefix", "." ]
python
train
35.588235
saltstack/salt
salt/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L3386-L3447
def _merge_tops_same(self, tops): ''' For each saltenv, only consider the top file from that saltenv. All sections matching a given saltenv, which appear in a different saltenv's top file, will be ignored. ''' top = DefaultOrderedDict(OrderedDict) for cenv, ctops in six.iteritems(tops): if all([x == {} for x in ctops]): # No top file found in this env, check the default_top default_top = self.opts['default_top'] fallback_tops = tops.get(default_top, []) if all([x == {} for x in fallback_tops]): # Nothing in the fallback top file log.error( 'The \'%s\' saltenv has no top file, and the fallback ' 'saltenv specified by default_top (%s) also has no ' 'top file', cenv, default_top ) continue for ctop in fallback_tops: for saltenv, targets in six.iteritems(ctop): if saltenv != cenv: continue log.debug( 'The \'%s\' saltenv has no top file, using the ' 'default_top saltenv (%s)', cenv, default_top ) for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] break else: log.error( 'The \'%s\' saltenv has no top file, and no ' 'matches were found in the top file for the ' 'default_top saltenv (%s)', cenv, default_top ) continue else: for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue elif saltenv != cenv: log.debug( 'Section for saltenv \'%s\' in the \'%s\' ' 'saltenv\'s top file will be ignored, as the ' 'top_file_merging_strategy is set to \'same\' ' 'and the saltenvs do not match', saltenv, cenv ) continue try: for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: raise SaltRenderError('Unable to render top file. No targets found.') return top
[ "def", "_merge_tops_same", "(", "self", ",", "tops", ")", ":", "top", "=", "DefaultOrderedDict", "(", "OrderedDict", ")", "for", "cenv", ",", "ctops", "in", "six", ".", "iteritems", "(", "tops", ")", ":", "if", "all", "(", "[", "x", "==", "{", "}", ...
For each saltenv, only consider the top file from that saltenv. All sections matching a given saltenv, which appear in a different saltenv's top file, will be ignored.
[ "For", "each", "saltenv", "only", "consider", "the", "top", "file", "from", "that", "saltenv", ".", "All", "sections", "matching", "a", "given", "saltenv", "which", "appear", "in", "a", "different", "saltenv", "s", "top", "file", "will", "be", "ignored", "...
python
train
45.274194
mozilla-releng/signtool
signtool/util/archives.py
https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L171-L183
def packtar(tarfile, files, srcdir): """ Pack the given files into a tar, setting cwd = srcdir""" nullfd = open(os.devnull, "w") tarfile = cygpath(os.path.abspath(tarfile)) log.debug("pack tar %s from folder %s with files ", tarfile, srcdir) log.debug(files) try: check_call([TAR, '-czf', tarfile] + files, cwd=srcdir, stdout=nullfd, preexec_fn=_noumask) except Exception: log.exception("Error packing tar file %s to %s", tarfile, srcdir) raise nullfd.close()
[ "def", "packtar", "(", "tarfile", ",", "files", ",", "srcdir", ")", ":", "nullfd", "=", "open", "(", "os", ".", "devnull", ",", "\"w\"", ")", "tarfile", "=", "cygpath", "(", "os", ".", "path", ".", "abspath", "(", "tarfile", ")", ")", "log", ".", ...
Pack the given files into a tar, setting cwd = srcdir
[ "Pack", "the", "given", "files", "into", "a", "tar", "setting", "cwd", "=", "srcdir" ]
python
train
40.230769
bcbio/bcbio-nextgen
bcbio/rnaseq/qc.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/qc.py#L42-L64
def estimate_library_complexity(df, algorithm="RNA-seq"): """ estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line """ DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
[ "def", "estimate_library_complexity", "(", "df", ",", "algorithm", "=", "\"RNA-seq\"", ")", ":", "DEFAULT_CUTOFFS", "=", "{", "\"RNA-seq\"", ":", "(", "0.25", ",", "0.40", ")", "}", "cutoffs", "=", "DEFAULT_CUTOFFS", "[", "algorithm", "]", "if", "len", "(", ...
estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line
[ "estimate", "library", "complexity", "from", "the", "number", "of", "reads", "vs", ".", "number", "of", "unique", "start", "sites", ".", "returns", "NA", "if", "there", "are", "not", "enough", "data", "points", "to", "fit", "the", "line" ]
python
train
33.73913
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L1439-L1518
def _parselog(self, r): """ Parse bazaar log file format Args: r (str): bzr revision identifier Yields: dict: dict of (attr, value) pairs :: $ bzr log -l1 ------------------------------------------------------------ revno: 1 committer: ubuntu <ubuntu@ubuntu-desktop> branch nick: ubuntu-desktop /etc repository timestamp: Wed 2011-10-12 01:16:55 -0500 message: Initial commit """ def __parselog(entry): """ Parse bazaar log file format Args: entry (str): log message string Yields: tuple: (attrname, value) """ bufname = None buf = deque() print(entry) if entry == ['']: return for l in itersplit(entry, '\n'): if not l: continue mobj = self.logrgx.match(l) if not mobj: # " - Log message" buf.append(self._logmessage_transform(l)) if mobj: mobjlen = len(mobj.groups()) if mobjlen == 2: # "attr: value" attr, value = mobj.groups() if attr == 'message': bufname = 'desc' else: attr = self.field_trans.get(attr, attr) yield (self.field_trans.get(attr, attr), value) else: raise Exception() if bufname is not None: if len(buf): buf.pop() len(buf) > 1 and buf.popleft() yield (bufname, '\n'.join(buf)) return kwargs = dict(__parselog(r)) # FIXME if kwargs: if 'tags' not in kwargs: kwargs['tags'] = tuple() else: kwargs['tags'].split(' ') # TODO if 'branchnick' not in kwargs: kwargs['branchnick'] = None try: yield kwargs # TODO # return self._tuple(**kwargs) except: log.error(r) log.error(kwargs) raise else: log.error("failed to parse: %r" % r)
[ "def", "_parselog", "(", "self", ",", "r", ")", ":", "def", "__parselog", "(", "entry", ")", ":", "\"\"\"\n Parse bazaar log file format\n\n Args:\n entry (str): log message string\n\n Yields:\n tuple: (attrname, value)\n ...
Parse bazaar log file format Args: r (str): bzr revision identifier Yields: dict: dict of (attr, value) pairs :: $ bzr log -l1 ------------------------------------------------------------ revno: 1 committer: ubuntu <ubuntu@ubuntu-desktop> branch nick: ubuntu-desktop /etc repository timestamp: Wed 2011-10-12 01:16:55 -0500 message: Initial commit
[ "Parse", "bazaar", "log", "file", "format" ]
python
train
30.3875
pyscaffold/configupdater
src/configupdater/configupdater.py
https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L121-L124
def add_after(self): """Returns a builder inserting a new block after the current block""" idx = self._container.structure.index(self) return BlockBuilder(self._container, idx+1)
[ "def", "add_after", "(", "self", ")", ":", "idx", "=", "self", ".", "_container", ".", "structure", ".", "index", "(", "self", ")", "return", "BlockBuilder", "(", "self", ".", "_container", ",", "idx", "+", "1", ")" ]
Returns a builder inserting a new block after the current block
[ "Returns", "a", "builder", "inserting", "a", "new", "block", "after", "the", "current", "block" ]
python
train
49.75
RJT1990/pyflux
pyflux/gas/gasreg.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasreg.py#L443-L528
def plot_predict(self, h=5, past_values=20, intervals=True, oos_data=None, **kwargs): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show prediction intervals for the forecast? oos_data : pd.DataFrame Data for the variables to be used out of sample (ys can be NaNs) Returns ---------- - Plot of the forecast """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: # Sort/manipulate the out-of-sample data _, X_oos = dmatrices(self.formula, oos_data) X_oos = np.array([X_oos])[0] X_pred = X_oos[:h] date_index = self.shift_dates(h) if self.latent_variables.estimation_method in ['M-H']: sim_vector = np.zeros([15000,h]) for n in range(0, 15000): t_z = self.draw_latent_variables(nsims=1).T[0] _, Y, _, coefficients = self._model(t_z) coefficients_star = coefficients.T[-1] theta_pred = np.dot(np.array([coefficients_star]), X_pred.T)[0] t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])]) model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z) sim_vector[n,:] = self.family.draw_variable(self.link(theta_pred), model_scale, model_shape, model_skewness, theta_pred.shape[0]) mean_values = np.append(Y, self.link(np.array([np.mean(i) for i in sim_vector.T]))) else: # Retrieve data, dates and (transformed) latent variables _, Y, _, coefficients = self._model(self.latent_variables.get_z_values()) coefficients_star = coefficients.T[-1] theta_pred = np.dot(np.array([coefficients_star]), X_pred.T)[0] t_z = self.transform_z() sim_vector = np.zeros([15000,h]) mean_values = np.append(Y, self.link(theta_pred)) model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z) if self.model_name2 == "Skewt": m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0)) mean_values += (model_skewness - (1.0/model_skewness))*model_scale*m1 for n in range(0,15000): sim_vector[n,:] = self.family.draw_variable(self.link(theta_pred),model_scale,model_shape,model_skewness,theta_pred.shape[0]) sim_vector = sim_vector.T error_bars = [] for pre in range(5,100,5): error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, mean_values[-h-1])) forecasted_values = mean_values[-h-1:] plot_values = mean_values[-h-past_values:] plot_index = date_index[-h-past_values:] plt.figure(figsize=figsize) if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] for count in range(9): plt.fill_between(date_index[-h-1:], error_bars[count], error_bars[-count], alpha=alpha[count]) plt.plot(plot_index,plot_values) plt.title("Forecast for " + self.data_name) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show()
[ "def", "plot_predict", "(", "self", ",", "h", "=", "5", ",", "past_values", "=", "20", ",", "intervals", "=", "True", ",", "oos_data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seab...
Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show prediction intervals for the forecast? oos_data : pd.DataFrame Data for the variables to be used out of sample (ys can be NaNs) Returns ---------- - Plot of the forecast
[ "Makes", "forecast", "with", "the", "estimated", "model" ]
python
train
45.255814
KelSolaar/Umbra
umbra/ui/models.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/models.py#L328-L363
def setData(self, index, value, role=Qt.EditRole): """ Reimplements the :meth:`QAbstractItemModel.setData` method. :param index: Index. :type index: QModelIndex :param value: Value. :type value: QVariant :param role: Role. :type role: int :return: Method success. :rtype: bool """ if not index.isValid(): return False node = self.get_node(index) if role == Qt.DisplayRole or role == Qt.EditRole: value = foundations.strings.to_string(value.toString()) roles = {Qt.DisplayRole: value, Qt.EditRole: value} else: roles = {role: value} if index.column() == 0: if (node and hasattr(node, "roles")): node.roles.update(roles) node.name = value else: attribute = self.get_attribute(node, index.column()) if (attribute and hasattr(attribute, "roles")): attribute.roles.update(roles) attribute.value = value self.dataChanged.emit(index, index) return True
[ "def", "setData", "(", "self", ",", "index", ",", "value", ",", "role", "=", "Qt", ".", "EditRole", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "False", "node", "=", "self", ".", "get_node", "(", "index", ")", "if", "...
Reimplements the :meth:`QAbstractItemModel.setData` method. :param index: Index. :type index: QModelIndex :param value: Value. :type value: QVariant :param role: Role. :type role: int :return: Method success. :rtype: bool
[ "Reimplements", "the", ":", "meth", ":", "QAbstractItemModel", ".", "setData", "method", "." ]
python
train
30.972222
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1702-L1793
def correct_db_restart(self): """Ensure DB is consistent after unexpected restarts. """ LOG.info("Checking consistency of DB") # Any Segments allocated that's not in Network or FW DB, release it seg_netid_dict = self.service_segs.get_seg_netid_src(fw_const.FW_CONST) vlan_netid_dict = self.service_vlans.get_seg_netid_src( fw_const.FW_CONST) for netid in seg_netid_dict: net = self.get_network(netid) fw_net = self.get_fw_by_netid(netid) if not net or not fw_net: if netid in vlan_netid_dict: vlan_net = vlan_netid_dict[netid] else: vlan_net = None self.delete_os_nwk_db(netid, seg_netid_dict[netid], vlan_net) LOG.info("Allocated segment for net %s not in DB " "returning", net) return # Any VLANs allocated that's not in Network or FW DB, release it # For Virtual case, this list will be empty for netid in vlan_netid_dict: net = self.get_network(netid) fw_net = self.get_fw_by_netid(netid) if not net or not fw_net: if netid in seg_netid_dict: vlan_net = seg_netid_dict[netid] else: vlan_net = None self.delete_os_nwk_db(netid, vlan_net, vlan_netid_dict[netid]) LOG.info("Allocated vlan for net %s not in DB returning", net) return # Release all IP's from DB that has no NetID or SubnetID self.service_in_ip.release_subnet_no_netid() self.service_out_ip.release_subnet_no_netid() # It leaves out following possibilities not covered by above. # 1. Crash can happen just after creating FWID in DB (for init state) # 2. Crash can happen after 1 + IP address allocation # 3. Crash can happen after 2 + create OS network # IP address allocated will be freed as above. # Only OS network will remain for case 3. # Also, create that FW DB entry only if that FWID didn't exist. # Delete all dummy networks created for dummy router from OS if it's # ID is not in NetDB # Delete all dummy routers and its associated networks/subnetfrom OS # if it's ID is not in FWDB fw_dict = self.get_all_fw_db() for fw_id in fw_dict: rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + ( fw_id[len(fw_id) - 4:]) net_list = self.os_helper.get_network_by_name(rtr_nwk) # TODO(padkrish) Come back to finish this. Not sure of this. # The router interface should be deleted first and then the network # Try using show_router for net in net_list: # Check for if it's there in NetDB net_db_item = self.get_network(net.get('id')) if not net_db_item: self.os_helper.delete_network_all_subnets(net.get('id')) LOG.info("Router Network %s not in DB, returning", net.get('id')) return rtr_name = fw_id[0:4] + fw_const.DUMMY_SERVICE_RTR + ( fw_id[len(fw_id) - 4:]) rtr_list = self.os_helper.get_rtr_by_name(rtr_name) for rtr in rtr_list: fw_db_item = self.get_fw_by_rtrid(rtr.get('id')) if not fw_db_item: # There should be only one if not net_list: LOG.error("net_list len is 0, router net not " "found") return fw_type = fw_dict[fw_id].get('fw_type') if fw_type == fw_const.FW_TENANT_EDGE: rtr_net = net_list[0] rtr_subnet_lt = ( self.os_helper.get_subnets_for_net(rtr_net)) if rtr_subnet_lt is None: LOG.error("router subnet not found for " "net %s", rtr_net) return rtr_subnet_id = rtr_subnet_lt[0].get('id') LOG.info("Deleted dummy router network %s", rtr.get('id')) ret = self.delete_os_dummy_rtr_nwk(rtr.get('id'), rtr_net.get('id'), rtr_subnet_id) return ret LOG.info("Done Checking consistency of DB, no issues")
[ "def", "correct_db_restart", "(", "self", ")", ":", "LOG", ".", "info", "(", "\"Checking consistency of DB\"", ")", "# Any Segments allocated that's not in Network or FW DB, release it", "seg_netid_dict", "=", "self", ".", "service_segs", ".", "get_seg_netid_src", "(", "fw_...
Ensure DB is consistent after unexpected restarts.
[ "Ensure", "DB", "is", "consistent", "after", "unexpected", "restarts", "." ]
python
train
50.967391
xeroc/python-graphenelib
graphenestorage/masterpassword.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenestorage/masterpassword.py#L141-L153
def _get_encrypted_masterpassword(self): """ Obtain the encrypted masterkey .. note:: The encrypted masterkey is checksummed, so that we can figure out that a provided password is correct or not. The checksum is only 4 bytes long! """ if not self.unlocked(): raise WalletLocked aes = AESCipher(self.password) return "{}${}".format( self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey) )
[ "def", "_get_encrypted_masterpassword", "(", "self", ")", ":", "if", "not", "self", ".", "unlocked", "(", ")", ":", "raise", "WalletLocked", "aes", "=", "AESCipher", "(", "self", ".", "password", ")", "return", "\"{}${}\"", ".", "format", "(", "self", ".",...
Obtain the encrypted masterkey .. note:: The encrypted masterkey is checksummed, so that we can figure out that a provided password is correct or not. The checksum is only 4 bytes long!
[ "Obtain", "the", "encrypted", "masterkey" ]
python
valid
38.846154
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L2122-L2143
def refresh(self, index=None): """Refresh tabwidget""" if index is None: index = self.get_stack_index() # Set current editor if self.get_stack_count(): index = self.get_stack_index() finfo = self.data[index] editor = finfo.editor editor.setFocus() self._refresh_outlineexplorer(index, update=False) self.__refresh_statusbar(index) self.__refresh_readonly(index) self.__check_file_status(index) self.__modify_stack_title() self.update_plugin_title.emit() else: editor = None # Update the modification-state-dependent parameters self.modification_changed() # Update FindReplace binding self.find_widget.set_editor(editor, refresh=False)
[ "def", "refresh", "(", "self", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "get_stack_index", "(", ")", "# Set current editor\r", "if", "self", ".", "get_stack_count", "(", ")", ":", "index", "=", "...
Refresh tabwidget
[ "Refresh", "tabwidget" ]
python
train
38.636364
mbj4668/pyang
pyang/plugins/sample-xml-skeleton.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L135-L140
def process_children(self, node, elem, module, path, omit=[]): """Proceed with all children of `node`.""" for ch in node.i_children: if ch not in omit and (ch.i_config or self.doctype == "data"): self.node_handler.get(ch.keyword, self.ignore)( ch, elem, module, path)
[ "def", "process_children", "(", "self", ",", "node", ",", "elem", ",", "module", ",", "path", ",", "omit", "=", "[", "]", ")", ":", "for", "ch", "in", "node", ".", "i_children", ":", "if", "ch", "not", "in", "omit", "and", "(", "ch", ".", "i_conf...
Proceed with all children of `node`.
[ "Proceed", "with", "all", "children", "of", "node", "." ]
python
train
54.333333
indico/indico-plugins
piwik/indico_piwik/reports.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/piwik/indico_piwik/reports.py#L59-L74
def get(cls, *args, **kwargs): """Create and return a serializable Report object, retrieved from cache if possible""" from indico_piwik.plugin import PiwikPlugin if not PiwikPlugin.settings.get('cache_enabled'): return cls(*args, **kwargs).to_serializable() cache = GenericCache('Piwik.Report') key = u'{}-{}-{}'.format(cls.__name__, args, kwargs) report = cache.get(key) if not report: report = cls(*args, **kwargs) cache.set(key, report, PiwikPlugin.settings.get('cache_ttl')) return report.to_serializable()
[ "def", "get", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "indico_piwik", ".", "plugin", "import", "PiwikPlugin", "if", "not", "PiwikPlugin", ".", "settings", ".", "get", "(", "'cache_enabled'", ")", ":", "return", "cls", "...
Create and return a serializable Report object, retrieved from cache if possible
[ "Create", "and", "return", "a", "serializable", "Report", "object", "retrieved", "from", "cache", "if", "possible" ]
python
train
37.4375
WZBSocialScienceCenter/tmtoolkit
tmtoolkit/topicmod/model_stats.py
https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/model_stats.py#L65-L75
def get_word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths): """ Calculate word saliency according to Chuang et al. 2012. saliency(w) = p(w) * distinctiveness(w) J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models" """ p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths) p_w = get_marginal_word_distrib(topic_word_distrib, p_t) return p_w * get_word_distinctiveness(topic_word_distrib, p_t)
[ "def", "get_word_saliency", "(", "topic_word_distrib", ",", "doc_topic_distrib", ",", "doc_lengths", ")", ":", "p_t", "=", "get_marginal_topic_distrib", "(", "doc_topic_distrib", ",", "doc_lengths", ")", "p_w", "=", "get_marginal_word_distrib", "(", "topic_word_distrib", ...
Calculate word saliency according to Chuang et al. 2012. saliency(w) = p(w) * distinctiveness(w) J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
[ "Calculate", "word", "saliency", "according", "to", "Chuang", "et", "al", ".", "2012", ".", "saliency", "(", "w", ")", "=", "p", "(", "w", ")", "*", "distinctiveness", "(", "w", ")" ]
python
train
45.090909
junzis/pyModeS
pyModeS/decoder/bds/bds44.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds44.py#L121-L138
def p44(msg): """Static pressure. Args: msg (String): 28 bytes hexadecimal message string Returns: int: static pressure in hPa """ d = hex2bin(data(msg)) if d[34] == '0': return None p = bin2int(d[35:46]) # hPa return p
[ "def", "p44", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "if", "d", "[", "34", "]", "==", "'0'", ":", "return", "None", "p", "=", "bin2int", "(", "d", "[", "35", ":", "46", "]", ")", "# hPa", "return", "p"...
Static pressure. Args: msg (String): 28 bytes hexadecimal message string Returns: int: static pressure in hPa
[ "Static", "pressure", "." ]
python
train
14.833333
YoSmudge/dnsyo
dnsyo/dnsyo.py
https://github.com/YoSmudge/dnsyo/blob/4734e36d712fefeb9a8ff22dfba678e382dde6cf/dnsyo/dnsyo.py#L352-L407
def outputStandard(self, extended=False): """ Standard, multi-line output display """ successfulResponses = len( [ True for rsp in self.results if rsp['success'] ] ) sys.stdout.write(""" - RESULTS I asked {num_servers} servers for {rec_type} records related to {domain}, {success_responses} responded with records and {error_responses} gave errors Here are the results;\n\n\n""".format( num_servers=len(self.serverList), rec_type=self.recordType, domain=self.domain, success_responses=successfulResponses, error_responses=len(self.serverList) - successfulResponses )) errors = [] for rsp in self.resultsColated: out = [] if extended: out.append("The following servers\n") out.append("\n".join([ " - {0} ({1} - {2})". format(s['ip'], s['provider'], s['country']) for s in rsp['servers']])) out.append("\nresponded with;\n") else: out.append("{num_servers} servers responded with;\n".format( num_servers=len(rsp['servers'])) ) out.append( "\n".join(rsp['results']) ) out.append("\n\n") if rsp['success']: sys.stdout.write("".join(out)) else: errors.append("".join(out)) sys.stdout.write("\n\nAnd here are the errors;\n\n\n") sys.stdout.write("".join(errors))
[ "def", "outputStandard", "(", "self", ",", "extended", "=", "False", ")", ":", "successfulResponses", "=", "len", "(", "[", "True", "for", "rsp", "in", "self", ".", "results", "if", "rsp", "[", "'success'", "]", "]", ")", "sys", ".", "stdout", ".", "...
Standard, multi-line output display
[ "Standard", "multi", "-", "line", "output", "display" ]
python
train
28.678571
cvxgrp/qcqp
qcqp/qcqp.py
https://github.com/cvxgrp/qcqp/blob/6b7a9804ad7429b72094c9a8da3b29d807037fe9/qcqp/qcqp.py#L72-L97
def solve_sdr(prob, *args, **kwargs): """Solve the SDP relaxation. """ # lifted variables and semidefinite constraint X = cvx.Semidef(prob.n + 1) W = prob.f0.homogeneous_form() rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X))) rel_constr = [X[-1, -1] == 1] for f in prob.fs: W = f.homogeneous_form() lhs = cvx.sum_entries(cvx.mul_elemwise(W, X)) if f.relop == '==': rel_constr.append(lhs == 0) else: rel_constr.append(lhs <= 0) rel_prob = cvx.Problem(rel_obj, rel_constr) rel_prob.solve(*args, **kwargs) if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]: raise Exception("Relaxation problem status: %s" % rel_prob.status) return X.value, rel_prob.value
[ "def", "solve_sdr", "(", "prob", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# lifted variables and semidefinite constraint", "X", "=", "cvx", ".", "Semidef", "(", "prob", ".", "n", "+", "1", ")", "W", "=", "prob", ".", "f0", ".", "homogeneou...
Solve the SDP relaxation.
[ "Solve", "the", "SDP", "relaxation", "." ]
python
train
29.730769
pypa/bandersnatch
src/bandersnatch_filter_plugins/regex_name.py
https://github.com/pypa/bandersnatch/blob/8b702c3bc128c5a1cbdd18890adede2f7f17fad4/src/bandersnatch_filter_plugins/regex_name.py#L72-L86
def check_match(self, name): """ Check if a release version matches any of the specificed patterns. Parameters ========== name: str Release name Returns ======= bool: True if it matches, False otherwise. """ return any(pattern.match(name) for pattern in self.patterns)
[ "def", "check_match", "(", "self", ",", "name", ")", ":", "return", "any", "(", "pattern", ".", "match", "(", "name", ")", "for", "pattern", "in", "self", ".", "patterns", ")" ]
Check if a release version matches any of the specificed patterns. Parameters ========== name: str Release name Returns ======= bool: True if it matches, False otherwise.
[ "Check", "if", "a", "release", "version", "matches", "any", "of", "the", "specificed", "patterns", "." ]
python
train
24
juanifioren/django-oidc-provider
oidc_provider/lib/utils/oauth2.py
https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/utils/oauth2.py#L56-L93
def protected_resource_view(scopes=None): """ View decorator. The client accesses protected resources by presenting the access token to the resource server. https://tools.ietf.org/html/rfc6749#section-7 """ if scopes is None: scopes = [] def wrapper(view): def view_wrapper(request, *args, **kwargs): access_token = extract_access_token(request) try: try: kwargs['token'] = Token.objects.get(access_token=access_token) except Token.DoesNotExist: logger.debug('[UserInfo] Token does not exist: %s', access_token) raise BearerTokenError('invalid_token') if kwargs['token'].has_expired(): logger.debug('[UserInfo] Token has expired: %s', access_token) raise BearerTokenError('invalid_token') if not set(scopes).issubset(set(kwargs['token'].scope)): logger.debug('[UserInfo] Missing openid scope.') raise BearerTokenError('insufficient_scope') except BearerTokenError as error: response = HttpResponse(status=error.status) response['WWW-Authenticate'] = 'error="{0}", error_description="{1}"'.format( error.code, error.description) return response return view(request, *args, **kwargs) return view_wrapper return wrapper
[ "def", "protected_resource_view", "(", "scopes", "=", "None", ")", ":", "if", "scopes", "is", "None", ":", "scopes", "=", "[", "]", "def", "wrapper", "(", "view", ")", ":", "def", "view_wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwar...
View decorator. The client accesses protected resources by presenting the access token to the resource server. https://tools.ietf.org/html/rfc6749#section-7
[ "View", "decorator", ".", "The", "client", "accesses", "protected", "resources", "by", "presenting", "the", "access", "token", "to", "the", "resource", "server", ".", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc6749#section", ...
python
train
38.526316
jgorset/django-respite
respite/formats.py
https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/formats.py#L57-L67
def find_by_name(name): """ Find and return a format by name. :param name: A string describing the name of the format. """ for format in FORMATS: if name == format.name: return format raise UnknownFormat('No format found with name "%s"' % name)
[ "def", "find_by_name", "(", "name", ")", ":", "for", "format", "in", "FORMATS", ":", "if", "name", "==", "format", ".", "name", ":", "return", "format", "raise", "UnknownFormat", "(", "'No format found with name \"%s\"'", "%", "name", ")" ]
Find and return a format by name. :param name: A string describing the name of the format.
[ "Find", "and", "return", "a", "format", "by", "name", "." ]
python
train
25.454545
saltstack/salt
salt/modules/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nova.py#L659-L678
def image_meta_set(image_id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Sets a key=value pair in the metadata for an image (nova image-meta set) CLI Examples: .. code-block:: bash salt '*' nova.image_meta_set 6f52b2ff-0b31-4d84-8fd1-af45b84824f6 cheese=gruyere salt '*' nova.image_meta_set name=myimage salad=pasta beans=baked ''' conn = _auth(profile, **kwargs) return conn.image_meta_set( image_id, name, **kwargs )
[ "def", "image_meta_set", "(", "image_id", "=", "None", ",", "name", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=C0103", "conn", "=", "_auth", "(", "profile", ",", "*", "*", "kwargs", ")", "return", "c...
Sets a key=value pair in the metadata for an image (nova image-meta set) CLI Examples: .. code-block:: bash salt '*' nova.image_meta_set 6f52b2ff-0b31-4d84-8fd1-af45b84824f6 cheese=gruyere salt '*' nova.image_meta_set name=myimage salad=pasta beans=baked
[ "Sets", "a", "key", "=", "value", "pair", "in", "the", "metadata", "for", "an", "image", "(", "nova", "image", "-", "meta", "set", ")" ]
python
train
27.85
IdentityPython/pysaml2
src/saml2/sigver.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/sigver.py#L965-L987
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr): """ Validate signature on XML document. The parameters actually used in this CryptoBackend implementation are : :param signedtext: The signed XML data as string :param cert_file: xmlsec key_spec string(), filename, 'pkcs11://' URI or PEM data :param cert_type: string, must be 'pem' for now :returns: True on successful validation, False otherwise """ if cert_type != 'pem': raise Unsupported('Only PEM certs supported here') import xmlsec xml = xmlsec.parse_xml(signedtext) try: return xmlsec.verify(xml, cert_file) except xmlsec.XMLSigException: return False
[ "def", "validate_signature", "(", "self", ",", "signedtext", ",", "cert_file", ",", "cert_type", ",", "node_name", ",", "node_id", ",", "id_attr", ")", ":", "if", "cert_type", "!=", "'pem'", ":", "raise", "Unsupported", "(", "'Only PEM certs supported here'", ")...
Validate signature on XML document. The parameters actually used in this CryptoBackend implementation are : :param signedtext: The signed XML data as string :param cert_file: xmlsec key_spec string(), filename, 'pkcs11://' URI or PEM data :param cert_type: string, must be 'pem' for now :returns: True on successful validation, False otherwise
[ "Validate", "signature", "on", "XML", "document", "." ]
python
train
34.565217
happyleavesaoc/python-limitlessled
limitlessled/bridge.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/bridge.py#L175-L225
def _consume(self): """ Consume commands from the queue. The command is repeated according to the configured value. Wait after each command is sent. The bridge socket is a shared resource. It must only be used by one thread at a time. Note that this can and will delay commands if multiple groups are attempting to communicate at the same time on the same bridge. """ while not self.is_closed: # Get command from queue. msg = self._command_queue.get() # Closed if msg is None: return # Use the lock so we are sure is_ready is not changed during execution # and the socket is not in use with self._lock: # Check if bridge is ready if self.is_ready: (command, reps, wait) = msg # Select group if a different group is currently selected. if command.select and self._selected_number != command.group_number: if self._send_raw(command.select_command.get_bytes(self)): self._selected_number = command.group_number time.sleep(SELECT_WAIT) else: # Stop sending on socket error self.is_ready = False # Repeat command as necessary. for _ in range(reps): if self.is_ready: if self._send_raw(command.get_bytes(self)): time.sleep(wait) else: # Stop sending on socket error self.is_ready = False # Wait if bridge is not ready, we're only reading is_ready, no lock needed if not self.is_ready and not self.is_closed: # For older bridges, always try again, there's no keep-alive thread if self.version < 6: # Give the reconnect some time time.sleep(RECONNECT_TIME) self.is_ready = True
[ "def", "_consume", "(", "self", ")", ":", "while", "not", "self", ".", "is_closed", ":", "# Get command from queue.", "msg", "=", "self", ".", "_command_queue", ".", "get", "(", ")", "# Closed", "if", "msg", "is", "None", ":", "return", "# Use the lock so we...
Consume commands from the queue. The command is repeated according to the configured value. Wait after each command is sent. The bridge socket is a shared resource. It must only be used by one thread at a time. Note that this can and will delay commands if multiple groups are attempting to communicate at the same time on the same bridge.
[ "Consume", "commands", "from", "the", "queue", "." ]
python
train
42.784314
quantmind/dynts
dynts/utils/numbers.py
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/utils/numbers.py#L10-L22
def isnumeric(obj): ''' Return true if obj is a numeric value ''' from decimal import Decimal if type(obj) == Decimal: return True else: try: float(obj) except: return False return True
[ "def", "isnumeric", "(", "obj", ")", ":", "from", "decimal", "import", "Decimal", "if", "type", "(", "obj", ")", "==", "Decimal", ":", "return", "True", "else", ":", "try", ":", "float", "(", "obj", ")", "except", ":", "return", "False", "return", "T...
Return true if obj is a numeric value
[ "Return", "true", "if", "obj", "is", "a", "numeric", "value" ]
python
train
20.384615
vladimarius/pyap
pyap/parser.py
https://github.com/vladimarius/pyap/blob/7896b5293982a30c1443e0c81c1ca32eeb8db15c/pyap/parser.py#L81-L103
def _combine_results(self, match_as_dict): '''Combine results from different parsed parts: we look for non-empty results in values like 'postal_code_b' or 'postal_code_c' and store them as main value. So 'postal_code_b':'123456' becomes: 'postal_code' :'123456' ''' keys = [] vals = [] for k, v in six.iteritems(match_as_dict): if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m': if v: # strip last 2 chars: '..._b' -> '...' keys.append(k[:-2]) vals.append(v) else: if k not in keys: keys.append(k) vals.append(v) return dict(zip(keys, vals))
[ "def", "_combine_results", "(", "self", ",", "match_as_dict", ")", ":", "keys", "=", "[", "]", "vals", "=", "[", "]", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "match_as_dict", ")", ":", "if", "k", "[", "-", "2", ":", "]", "in", ...
Combine results from different parsed parts: we look for non-empty results in values like 'postal_code_b' or 'postal_code_c' and store them as main value. So 'postal_code_b':'123456' becomes: 'postal_code' :'123456'
[ "Combine", "results", "from", "different", "parsed", "parts", ":", "we", "look", "for", "non", "-", "empty", "results", "in", "values", "like", "postal_code_b", "or", "postal_code_c", "and", "store", "them", "as", "main", "value", "." ]
python
train
37.217391
nicolargo/glances
glances/plugins/glances_help.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_help.py#L55-L103
def generate_view_data(self): """Generate the views.""" self.view_data['version'] = '{} {}'.format('Glances', __version__) self.view_data['psutil_version'] = ' with psutil {}'.format(psutil_version) try: self.view_data['configuration_file'] = 'Configuration file: {}'.format(self.config.loaded_config_file) except AttributeError: pass msg_col = ' {0:1} {1:35}' msg_col2 = ' {0:1} {1:35}' self.view_data['sort_auto'] = msg_col.format('a', 'Sort processes automatically') self.view_data['sort_network'] = msg_col2.format('b', 'Bytes or bits for network I/O') self.view_data['sort_cpu'] = msg_col.format('c', 'Sort processes by CPU%') self.view_data['show_hide_alert'] = msg_col2.format('l', 'Show/hide alert logs') self.view_data['sort_mem'] = msg_col.format('m', 'Sort processes by MEM%') self.view_data['sort_user'] = msg_col.format('u', 'Sort processes by USER') self.view_data['delete_warning_alerts'] = msg_col2.format('w', 'Delete warning alerts') self.view_data['sort_proc'] = msg_col.format('p', 'Sort processes by name') self.view_data['delete_warning_critical_alerts'] = msg_col2.format('x', 'Delete warning and critical alerts') self.view_data['sort_io'] = msg_col.format('i', 'Sort processes by I/O rate') self.view_data['percpu'] = msg_col2.format('1', 'Global CPU or per-CPU stats') self.view_data['sort_cpu_times'] = msg_col.format('t', 'Sort processes by TIME') self.view_data['show_hide_help'] = msg_col2.format('h', 'Show/hide this help screen') self.view_data['show_hide_diskio'] = msg_col.format('d', 'Show/hide disk I/O stats') self.view_data['show_hide_irq'] = msg_col2.format('Q', 'Show/hide IRQ stats') self.view_data['view_network_io_combination'] = msg_col2.format('T', 'View network I/O as combination') self.view_data['show_hide_filesystem'] = msg_col.format('f', 'Show/hide filesystem stats') self.view_data['view_cumulative_network'] = msg_col2.format('U', 'View cumulative network I/O') self.view_data['show_hide_network'] = msg_col.format('n', 'Show/hide network stats') self.view_data['show_hide_filesytem_freespace'] = msg_col2.format('F', 'Show filesystem free space') self.view_data['show_hide_sensors'] = msg_col.format('s', 'Show/hide sensors stats') self.view_data['generate_graphs'] = msg_col2.format('g', 'Generate graphs for current history') self.view_data['show_hide_left_sidebar'] = msg_col.format('2', 'Show/hide left sidebar') self.view_data['reset_history'] = msg_col2.format('r', 'Reset history') self.view_data['enable_disable_process_stats'] = msg_col.format('z', 'Enable/disable processes stats') self.view_data['quit'] = msg_col2.format('q', 'Quit (Esc and Ctrl-C also work)') self.view_data['enable_disable_top_extends_stats'] = msg_col.format('e', 'Enable/disable top extended stats') self.view_data['enable_disable_short_processname'] = msg_col.format('/', 'Enable/disable short processes name') self.view_data['enable_disable_irix'] = msg_col.format('0', 'Enable/disable Irix process CPU') self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats') self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin') self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module') self.view_data['diskio_iops'] = msg_col2.format('B', 'Count/rate for Disk I/O') self.view_data['show_hide_top_menu'] = msg_col2.format('5', 'Show/hide top menu (QL, CPU, MEM, SWAP and LOAD)') self.view_data['enable_disable_gpu'] = msg_col.format('G', 'Enable/disable gpu plugin') self.view_data['enable_disable_mean_gpu'] = msg_col2.format('6', 'Enable/disable mean gpu') self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern'
[ "def", "generate_view_data", "(", "self", ")", ":", "self", ".", "view_data", "[", "'version'", "]", "=", "'{} {}'", ".", "format", "(", "'Glances'", ",", "__version__", ")", "self", ".", "view_data", "[", "'psutil_version'", "]", "=", "' with psutil {}'", "...
Generate the views.
[ "Generate", "the", "views", "." ]
python
train
81.714286
genialis/resolwe-runtime-utils
resolwe_runtime_utils.py
https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L325-L530
def import_file( src, file_name, imported_format=ImportedFormat.BOTH, progress_from=0.0, progress_to=None, ): """Import file to working directory. :param src: Source file path or URL :param file_name: Source file name :param imported_format: Import file format (extracted, compressed or both) :param progress_from: Initial progress value :param progress_to: Final progress value :return: Destination file path (if extracted and compressed, extracted path given) """ if progress_to is not None: if not isinstance(progress_from, float) or not isinstance(progress_to, float): raise ValueError("Progress_from and progress_to must be float") if progress_from < 0 or progress_from > 1: raise ValueError("Progress_from must be between 0 and 1") if progress_to < 0 or progress_to > 1: raise ValueError("Progress_to must be between 0 and 1") if progress_from >= progress_to: raise ValueError("Progress_to must be higher than progress_from") print("Importing and compressing {}...".format(file_name)) def importGz(): """Import gzipped file. The file_name must have .gz extension. """ if imported_format != ImportedFormat.COMPRESSED: # Extracted file required with open(file_name[:-3], 'wb') as f_out, gzip.open(src, 'rb') as f_in: try: shutil.copyfileobj(f_in, f_out, CHUNK_SIZE) except zlib.error: raise ValueError("Invalid gzip file format: {}".format(file_name)) else: # Extracted file not-required # Verify the compressed file. with gzip.open(src, 'rb') as f: try: while f.read(CHUNK_SIZE) != b'': pass except zlib.error: raise ValueError("Invalid gzip file format: {}".format(file_name)) if imported_format != ImportedFormat.EXTRACTED: # Compressed file required try: shutil.copyfile(src, file_name) except shutil.SameFileError: pass # Skip copy of downloaded files if imported_format == ImportedFormat.COMPRESSED: return file_name else: return file_name[:-3] def import7z(): """Import compressed file in various formats. Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2. """ extracted_name, _ = os.path.splitext(file_name) destination_name = extracted_name temp_dir = 'temp_{}'.format(extracted_name) cmd = '7z x -y -o{} {}'.format(shlex.quote(temp_dir), shlex.quote(src)) try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: if err.returncode == 2: raise ValueError("Failed to extract file: {}".format(file_name)) else: raise paths = os.listdir(temp_dir) if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])): # Single file in archive. temp_file = os.path.join(temp_dir, paths[0]) if imported_format != ImportedFormat.EXTRACTED: # Compressed file required with open(temp_file, 'rb') as f_in, gzip.open( extracted_name + '.gz', 'wb' ) as f_out: shutil.copyfileobj(f_in, f_out, CHUNK_SIZE) if imported_format != ImportedFormat.COMPRESSED: # Extracted file required shutil.move(temp_file, './{}'.format(extracted_name)) if extracted_name.endswith('.tar'): with tarfile.open(extracted_name) as tar: tar.extractall() os.remove(extracted_name) destination_name, _ = os.path.splitext(extracted_name) else: destination_name = extracted_name + '.gz' else: # Directory or several files in archive. if imported_format != ImportedFormat.EXTRACTED: # Compressed file required with tarfile.open(extracted_name + '.tar.gz', 'w:gz') as tar: for fname in glob.glob(os.path.join(temp_dir, '*')): tar.add(fname, os.path.basename(fname)) if imported_format != ImportedFormat.COMPRESSED: # Extracted file required for path in os.listdir(temp_dir): shutil.move(os.path.join(temp_dir, path), './{}'.format(path)) else: destination_name = extracted_name + '.tar.gz' shutil.rmtree(temp_dir) return destination_name def importUncompressed(): """Import uncompressed file.""" if imported_format != ImportedFormat.EXTRACTED: # Compressed file required with open(src, 'rb') as f_in, gzip.open(file_name + '.gz', 'wb') as f_out: shutil.copyfileobj(f_in, f_out, CHUNK_SIZE) if imported_format != ImportedFormat.COMPRESSED: # Extracted file required try: shutil.copyfile(src, file_name) except shutil.SameFileError: pass # Skip copy of downloaded files return ( file_name + '.gz' if imported_format == ImportedFormat.COMPRESSED else file_name ) # Large file download from Google Drive requires cookie and token. try: response = None if re.match( r'^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$', src, ): session = requests.Session() response = session.get(src, stream=True) token = None for key, value in response.cookies.items(): if key.startswith('download_warning'): token = value break if token is not None: params = {'confirm': token} response = session.get(src, params=params, stream=True) elif re.match( r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$', src, ): response = requests.get(src, stream=True) except requests.exceptions.ConnectionError: raise requests.exceptions.ConnectionError("Could not connect to {}".format(src)) if response: with open(file_name, 'wb') as f: total = response.headers.get('content-length') total = float(total) if total else None downloaded = 0 current_progress = 0 for content in response.iter_content(chunk_size=CHUNK_SIZE): f.write(content) if total is not None and progress_to is not None: downloaded += len(content) progress_span = progress_to - progress_from next_progress = progress_from + progress_span * downloaded / total next_progress = round(next_progress, 2) if next_progress > current_progress: print(progress(next_progress)) current_progress = next_progress # Check if a temporary file exists. if not os.path.isfile(file_name): raise ValueError("Downloaded file not found {}".format(file_name)) src = file_name else: if not os.path.isfile(src): raise ValueError("Source file not found {}".format(src)) # Decide which import should be used. if re.search(r'\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$', file_name): destination_file_name = import7z() elif file_name.endswith('.gz'): destination_file_name = importGz() else: destination_file_name = importUncompressed() if progress_to is not None: print(progress(progress_to)) return destination_file_name
[ "def", "import_file", "(", "src", ",", "file_name", ",", "imported_format", "=", "ImportedFormat", ".", "BOTH", ",", "progress_from", "=", "0.0", ",", "progress_to", "=", "None", ",", ")", ":", "if", "progress_to", "is", "not", "None", ":", "if", "not", ...
Import file to working directory. :param src: Source file path or URL :param file_name: Source file name :param imported_format: Import file format (extracted, compressed or both) :param progress_from: Initial progress value :param progress_to: Final progress value :return: Destination file path (if extracted and compressed, extracted path given)
[ "Import", "file", "to", "working", "directory", "." ]
python
train
38.058252
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L844-L876
def go_to_marker(self, row, col, table_type): """Move to point in time marked by the marker. Parameters ---------- row : QtCore.int column : QtCore.int table_type : str 'dataset' table or 'annot' table, it works on either """ if table_type == 'dataset': marker_time = self.idx_marker.property('start')[row] marker_end_time = self.idx_marker.property('end')[row] else: marker_time = self.idx_annot_list.property('start')[row] marker_end_time = self.idx_annot_list.property('end')[row] window_length = self.parent.value('window_length') if self.parent.traces.action['centre_event'].isChecked(): window_start = (marker_time + marker_end_time - window_length) / 2 else: window_start = floor(marker_time / window_length) * window_length self.parent.overview.update_position(window_start) if table_type == 'annot': for annot in self.parent.traces.idx_annot: if annot.marker.x() == marker_time: self.parent.traces.highlight_event(annot) break
[ "def", "go_to_marker", "(", "self", ",", "row", ",", "col", ",", "table_type", ")", ":", "if", "table_type", "==", "'dataset'", ":", "marker_time", "=", "self", ".", "idx_marker", ".", "property", "(", "'start'", ")", "[", "row", "]", "marker_end_time", ...
Move to point in time marked by the marker. Parameters ---------- row : QtCore.int column : QtCore.int table_type : str 'dataset' table or 'annot' table, it works on either
[ "Move", "to", "point", "in", "time", "marked", "by", "the", "marker", "." ]
python
train
36.424242
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L2294-L2306
def postinit(self, left=None, ops=None): """Do some setup after initialisation. :param left: The value at the left being applied to a comparison operator. :type left: NodeNG or None :param ops: The remainder of the operators and their relevant right hand value. :type ops: list(tuple(str, NodeNG)) or None """ self.left = left self.ops = ops
[ "def", "postinit", "(", "self", ",", "left", "=", "None", ",", "ops", "=", "None", ")", ":", "self", ".", "left", "=", "left", "self", ".", "ops", "=", "ops" ]
Do some setup after initialisation. :param left: The value at the left being applied to a comparison operator. :type left: NodeNG or None :param ops: The remainder of the operators and their relevant right hand value. :type ops: list(tuple(str, NodeNG)) or None
[ "Do", "some", "setup", "after", "initialisation", "." ]
python
train
32.230769
internetarchive/brozzler
brozzler/model.py
https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/model.py#L266-L316
def accept_reject_or_neither(self, url, parent_page=None): ''' Returns `True` (accepted), `False` (rejected), or `None` (no decision). `None` usually means rejected, unless `max_hops_off` comes into play. ''' if not isinstance(url, urlcanon.ParsedUrl): url = urlcanon.semantic(url) if not url.scheme in (b'http', b'https'): # XXX doesn't belong here maybe (where? worker ignores unknown # schemes?) return False try_parent_urls = [] if parent_page: try_parent_urls.append(urlcanon.semantic(parent_page.url)) if parent_page.redirect_url: try_parent_urls.append( urlcanon.semantic(parent_page.redirect_url)) # enforce max_hops if (parent_page and "max_hops" in self.scope and parent_page.hops_from_seed >= self.scope["max_hops"]): return False # enforce reject rules if "blocks" in self.scope: for block_rule in self.scope["blocks"]: rule = urlcanon.MatchRule(**block_rule) if try_parent_urls: for parent_url in try_parent_urls: if rule.applies(url, parent_url): return False else: if rule.applies(url): return False # honor accept rules for accept_rule in self.scope["accepts"]: rule = urlcanon.MatchRule(**accept_rule) if try_parent_urls: for parent_url in try_parent_urls: if rule.applies(url, parent_url): return True else: if rule.applies(url): return True # no decision if we reach here return None
[ "def", "accept_reject_or_neither", "(", "self", ",", "url", ",", "parent_page", "=", "None", ")", ":", "if", "not", "isinstance", "(", "url", ",", "urlcanon", ".", "ParsedUrl", ")", ":", "url", "=", "urlcanon", ".", "semantic", "(", "url", ")", "if", "...
Returns `True` (accepted), `False` (rejected), or `None` (no decision). `None` usually means rejected, unless `max_hops_off` comes into play.
[ "Returns", "True", "(", "accepted", ")", "False", "(", "rejected", ")", "or", "None", "(", "no", "decision", ")", "." ]
python
train
35.941176
StyXman/ayrton
ayrton/parser/error.py
https://github.com/StyXman/ayrton/blob/e1eed5c7ef230e3c2340a1f0bf44c72bbdc0debb/ayrton/parser/error.py#L166-L241
def normalize_exception(self, space): """Normalize the OperationError. In other words, fix w_type and/or w_value to make sure that the __class__ of w_value is exactly w_type. """ # # This method covers all ways in which the Python statement # "raise X, Y" can produce a valid exception type and instance. # # In the following table, 'Class' means a subclass of BaseException # and 'inst' is an instance of either 'Class' or a subclass of it. # # The flow object space only deals with non-advanced case. # # input (w_type, w_value)... becomes... advanced case? # --------------------------------------------------------------------- # (Class, None) (Class, Class()) no # (Class, inst) (inst.__class__, inst) no # (Class, tuple) (Class, Class(*tuple)) yes # (Class, x) (Class, Class(x)) no # (inst, None) (inst.__class__, inst) no # w_type = self.w_type w_value = self.get_w_value(space) if space.exception_is_valid_obj_as_class_w(w_type): # this is for all cases of the form (Class, something) if space.is_w(w_value, space.w_None): # raise Type: we assume we have to instantiate Type w_value = space.call_function(w_type) w_type = self._exception_getclass(space, w_value) else: w_valuetype = space.exception_getclass(w_value) if space.exception_issubclass_w(w_valuetype, w_type): # raise Type, Instance: let etype be the exact type of value w_type = w_valuetype else: if space.isinstance_w(w_value, space.w_tuple): # raise Type, tuple: assume the tuple contains the # constructor args w_value = space.call(w_type, w_value) else: # raise Type, X: assume X is the constructor argument w_value = space.call_function(w_type, w_value) w_type = self._exception_getclass(space, w_value) if self.w_cause: # ensure w_cause is of a valid type if space.is_none(self.w_cause): pass else: self._exception_getclass(space, self.w_cause, "exception causes") space.setattr(w_value, space.wrap("__cause__"), self.w_cause) if self._application_traceback: from pypy.interpreter.pytraceback import PyTraceback from pypy.module.exceptions.interp_exceptions import W_BaseException tb = self._application_traceback if (isinstance(w_value, W_BaseException) and isinstance(tb, PyTraceback)): # traceback hasn't escaped yet w_value.w_traceback = tb else: # traceback has escaped space.setattr(w_value, space.wrap("__traceback__"), space.wrap(self.get_traceback())) else: # the only case left here is (inst, None), from a 'raise inst'. w_inst = w_type w_instclass = self._exception_getclass(space, w_inst) if not space.is_w(w_value, space.w_None): raise OperationError(space.w_TypeError, space.wrap("instance exception may not " "have a separate value")) w_value = w_inst w_type = w_instclass self.w_type = w_type self._w_value = w_value
[ "def", "normalize_exception", "(", "self", ",", "space", ")", ":", "#", "# This method covers all ways in which the Python statement", "# \"raise X, Y\" can produce a valid exception type and instance.", "#", "# In the following table, 'Class' means a subclass of BaseException", "# and 'in...
Normalize the OperationError. In other words, fix w_type and/or w_value to make sure that the __class__ of w_value is exactly w_type.
[ "Normalize", "the", "OperationError", ".", "In", "other", "words", "fix", "w_type", "and", "/", "or", "w_value", "to", "make", "sure", "that", "the", "__class__", "of", "w_value", "is", "exactly", "w_type", "." ]
python
train
50.986842
coin-or/GiMPy
src/gimpy/graph.py
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L1484-L1524
def cycle_canceling(self, display): ''' API: cycle_canceling(self, display) Description: Solves minimum cost feasible flow problem using cycle canceling algorithm. Returns True when an optimal solution is found, returns False otherwise. 'flow' attribute values of arcs should be considered as junk when returned False. Input: display: Display method. Pre: (1) Arcs should have 'capacity' and 'cost' attribute. (2) Nodes should have 'demand' attribute, this value should be positive if the node is a supply node, negative if it is demand node and 0 if it is transhipment node. (3) graph should not have node 's' and 't'. Post: Changes 'flow' attributes of arcs. Return: Returns True when an optimal solution is found, returns False otherwise. ''' # find a feasible solution to flow problem if not self.find_feasible_flow(): return False # create residual graph residual_g = self.create_residual_graph() # identify a negative cycle in residual graph ncycle = residual_g.get_negative_cycle() # loop while residual graph has a negative cycle while ncycle is not None: # find capacity of cycle cap = residual_g.find_cycle_capacity(ncycle) # augment capacity amount along the cycle self.augment_cycle(cap, ncycle) # create residual graph residual_g = self.create_residual_graph() # identify next negative cycle ncycle = residual_g.get_negative_cycle() return True
[ "def", "cycle_canceling", "(", "self", ",", "display", ")", ":", "# find a feasible solution to flow problem", "if", "not", "self", ".", "find_feasible_flow", "(", ")", ":", "return", "False", "# create residual graph", "residual_g", "=", "self", ".", "create_residual...
API: cycle_canceling(self, display) Description: Solves minimum cost feasible flow problem using cycle canceling algorithm. Returns True when an optimal solution is found, returns False otherwise. 'flow' attribute values of arcs should be considered as junk when returned False. Input: display: Display method. Pre: (1) Arcs should have 'capacity' and 'cost' attribute. (2) Nodes should have 'demand' attribute, this value should be positive if the node is a supply node, negative if it is demand node and 0 if it is transhipment node. (3) graph should not have node 's' and 't'. Post: Changes 'flow' attributes of arcs. Return: Returns True when an optimal solution is found, returns False otherwise.
[ "API", ":", "cycle_canceling", "(", "self", "display", ")", "Description", ":", "Solves", "minimum", "cost", "feasible", "flow", "problem", "using", "cycle", "canceling", "algorithm", ".", "Returns", "True", "when", "an", "optimal", "solution", "is", "found", ...
python
train
42.170732
bolt-project/bolt
bolt/spark/chunk.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/chunk.py#L146-L200
def unchunk(self): """ Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray. """ plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split nchunks = self.getnumber(plan, vshape) full_shape = concatenate((nchunks, plan)) n = len(vshape) perm = concatenate(list(zip(range(n), range(n, 2*n)))) if self.uniform: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape) else: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) k_chks = [k[split:] for k in keys] arr = empty(nchunks, dtype='object') for (i, d) in zip(k_chks, values): arr[i] = d yield keys[0][:split], allstack(arr.tolist()) # remove padding if self.padded: removepad = self.removepad rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n)))) else: rdd = self._rdd # skip partitionBy if there is not actually any chunking if array_equal(self.plan, self.vshape): rdd = rdd.map(lambda kv: (kv[0][:split], kv[1])) ordered = self._ordered else: ranges = self.kshape npartitions = int(prod(ranges)) if len(self.kshape) == 0: partitioner = lambda k: 0 else: partitioner = lambda k: ravel_multi_index(k[:split], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk) ordered = True if array_equal(self.vshape, [1]): rdd = rdd.mapValues(lambda v: squeeze(v)) newshape = self.shape[:-1] else: newshape = self.shape return BoltArraySpark(rdd, shape=newshape, split=self._split, dtype=self.dtype, ordered=ordered)
[ "def", "unchunk", "(", "self", ")", ":", "plan", ",", "padding", ",", "vshape", ",", "split", "=", "self", ".", "plan", ",", "self", ".", "padding", ",", "self", ".", "vshape", ",", "self", ".", "split", "nchunks", "=", "self", ".", "getnumber", "(...
Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray.
[ "Convert", "a", "chunked", "array", "back", "into", "a", "full", "array", "with", "(", "key", "value", ")", "pairs", "where", "key", "is", "a", "tuple", "of", "indices", "and", "value", "is", "an", "ndarray", "." ]
python
test
41.2
mikedh/trimesh
trimesh/primitives.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/primitives.py#L448-L458
def moment_inertia(self): """ The analytic inertia tensor of the sphere primitive. Returns ---------- tensor: (3,3) float, 3D inertia tensor """ tensor = inertia.sphere_inertia(mass=self.volume, radius=self.primitive.radius) return tensor
[ "def", "moment_inertia", "(", "self", ")", ":", "tensor", "=", "inertia", ".", "sphere_inertia", "(", "mass", "=", "self", ".", "volume", ",", "radius", "=", "self", ".", "primitive", ".", "radius", ")", "return", "tensor" ]
The analytic inertia tensor of the sphere primitive. Returns ---------- tensor: (3,3) float, 3D inertia tensor
[ "The", "analytic", "inertia", "tensor", "of", "the", "sphere", "primitive", "." ]
python
train
30.272727
treycucco/pyebnf
pyebnf/primitive.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/primitive.py#L87-L97
def add_ignored(self, ignored): """Add ignored text to the node. This will add the length of the ignored text to the node's consumed property. """ if ignored: if self.ignored: self.ignored = ignored + self.ignored else: self.ignored = ignored self.consumed += len(ignored)
[ "def", "add_ignored", "(", "self", ",", "ignored", ")", ":", "if", "ignored", ":", "if", "self", ".", "ignored", ":", "self", ".", "ignored", "=", "ignored", "+", "self", ".", "ignored", "else", ":", "self", ".", "ignored", "=", "ignored", "self", "....
Add ignored text to the node. This will add the length of the ignored text to the node's consumed property.
[ "Add", "ignored", "text", "to", "the", "node", ".", "This", "will", "add", "the", "length", "of", "the", "ignored", "text", "to", "the", "node", "s", "consumed", "property", "." ]
python
test
28.272727
Ex-Mente/auxi.0
auxi/core/time.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/core/time.py#L79-L103
def get_datetime_at_period_ix(self, ix): """ Get the datetime at a given period. :param period: The index of the period. :returns: The datetime. """ if self.timestep_period_duration == TimePeriod.millisecond: return self.start_datetime + timedelta(milliseconds=ix) elif self.timestep_period_duration == TimePeriod.second: return self.start_datetime + timedelta(seconds=ix) elif self.timestep_period_duration == TimePeriod.minute: return self.start_datetime + timedelta(minutes=ix) elif self.timestep_period_duration == TimePeriod.hour: return self.start_datetime + timedelta(hours=ix) elif self.timestep_period_duration == TimePeriod.day: return self.start_datetime + relativedelta(days=ix) elif self.timestep_period_duration == TimePeriod.week: return self.start_datetime + relativedelta(days=ix*7) elif self.timestep_period_duration == TimePeriod.month: return self.start_datetime + relativedelta(months=ix) elif self.timestep_period_duration == TimePeriod.year: return self.start_datetime + relativedelta(years=ix)
[ "def", "get_datetime_at_period_ix", "(", "self", ",", "ix", ")", ":", "if", "self", ".", "timestep_period_duration", "==", "TimePeriod", ".", "millisecond", ":", "return", "self", ".", "start_datetime", "+", "timedelta", "(", "milliseconds", "=", "ix", ")", "e...
Get the datetime at a given period. :param period: The index of the period. :returns: The datetime.
[ "Get", "the", "datetime", "at", "a", "given", "period", "." ]
python
valid
47.84
mamrhein/specification
specification/_extd_ast_expr.py
https://github.com/mamrhein/specification/blob/a4c09a0d286cda7a04e8a189f12e23edd97f64ea/specification/_extd_ast_expr.py#L527-L530
def visit_GeneratorExp(self, node: AST, dfltChaining: bool = True) -> str: """Return `node`s representation as generator expression.""" return f"({self.visit(node.elt)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)})"
[ "def", "visit_GeneratorExp", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "return", "f\"({self.visit(node.elt)} \"", "f\"{' '.join(self.visit(gen) for gen in node.generators)})\"" ]
Return `node`s representation as generator expression.
[ "Return", "node", "s", "representation", "as", "generator", "expression", "." ]
python
train
64.75
NaturalHistoryMuseum/pyzbar
pyzbar/wrapper.py
https://github.com/NaturalHistoryMuseum/pyzbar/blob/833b375c0e84077943b7100cc9dc22a7bd48754b/pyzbar/wrapper.py#L123-L136
def zbar_function(fname, restype, *args): """Returns a foreign function exported by `zbar`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function. """ prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libzbar()))
[ "def", "zbar_function", "(", "fname", ",", "restype", ",", "*", "args", ")", ":", "prototype", "=", "CFUNCTYPE", "(", "restype", ",", "*", "args", ")", "return", "prototype", "(", "(", "fname", ",", "load_libzbar", "(", ")", ")", ")" ]
Returns a foreign function exported by `zbar`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
[ "Returns", "a", "foreign", "function", "exported", "by", "zbar", "." ]
python
train
35.428571
Erotemic/utool
utool/util_numpy.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L270-L294
def deterministic_shuffle(list_, seed=0, rng=None): r""" Args: list_ (list): seed (int): Returns: list: list_ CommandLine: python -m utool.util_numpy --test-deterministic_shuffle Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> list_ = [1, 2, 3, 4, 5, 6] >>> seed = 1 >>> list_ = deterministic_shuffle(list_, seed) >>> result = str(list_) >>> print(result) [3, 2, 5, 1, 4, 6] """ rng = ensure_rng(seed if rng is None else rng) rng.shuffle(list_) return list_
[ "def", "deterministic_shuffle", "(", "list_", ",", "seed", "=", "0", ",", "rng", "=", "None", ")", ":", "rng", "=", "ensure_rng", "(", "seed", "if", "rng", "is", "None", "else", "rng", ")", "rng", ".", "shuffle", "(", "list_", ")", "return", "list_" ...
r""" Args: list_ (list): seed (int): Returns: list: list_ CommandLine: python -m utool.util_numpy --test-deterministic_shuffle Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> list_ = [1, 2, 3, 4, 5, 6] >>> seed = 1 >>> list_ = deterministic_shuffle(list_, seed) >>> result = str(list_) >>> print(result) [3, 2, 5, 1, 4, 6]
[ "r", "Args", ":", "list_", "(", "list", ")", ":", "seed", "(", "int", ")", ":" ]
python
train
23.88
ojake/django-tracked-model
tracked_model/control.py
https://github.com/ojake/django-tracked-model/blob/19bc48874dd2e5fb5defedc6b8c5c3915cce1424/tracked_model/control.py#L7-L21
def create_track_token(request): """Returns ``TrackToken``. ``TrackToken' contains request and user making changes. It can be passed to ``TrackedModel.save`` instead of ``request``. It is intended to be used when passing ``request`` is not possible e.g. when ``TrackedModel.save`` will be called from celery task. """ from tracked_model.models import RequestInfo request_pk = RequestInfo.create_or_get_from_request(request).pk user_pk = None if request.user.is_authenticated(): user_pk = request.user.pk return TrackToken(request_pk=request_pk, user_pk=user_pk)
[ "def", "create_track_token", "(", "request", ")", ":", "from", "tracked_model", ".", "models", "import", "RequestInfo", "request_pk", "=", "RequestInfo", ".", "create_or_get_from_request", "(", "request", ")", ".", "pk", "user_pk", "=", "None", "if", "request", ...
Returns ``TrackToken``. ``TrackToken' contains request and user making changes. It can be passed to ``TrackedModel.save`` instead of ``request``. It is intended to be used when passing ``request`` is not possible e.g. when ``TrackedModel.save`` will be called from celery task.
[ "Returns", "TrackToken", ".", "TrackToken", "contains", "request", "and", "user", "making", "changes", "." ]
python
train
40.066667
jamieleshaw/lurklib
lurklib/connection.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/connection.py#L319-L330
def quit(self, reason=''): """ Sends a QUIT message, closes the connection and - ends Lurklib's main loop. Optional arguments: * reason='' - Reason for quitting. """ with self.lock: self.keep_going = False self._quit(reason) self._socket.shutdown(self._m_socket.SHUT_RDWR) self._socket.close()
[ "def", "quit", "(", "self", ",", "reason", "=", "''", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "keep_going", "=", "False", "self", ".", "_quit", "(", "reason", ")", "self", ".", "_socket", ".", "shutdown", "(", "self", ".", "_m_sock...
Sends a QUIT message, closes the connection and - ends Lurklib's main loop. Optional arguments: * reason='' - Reason for quitting.
[ "Sends", "a", "QUIT", "message", "closes", "the", "connection", "and", "-", "ends", "Lurklib", "s", "main", "loop", ".", "Optional", "arguments", ":", "*", "reason", "=", "-", "Reason", "for", "quitting", "." ]
python
train
32.5
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L6747-L6771
def create_namespaced_resource_quota(self, namespace, body, **kwargs): # noqa: E501 """create_namespaced_resource_quota # noqa: E501 create a ResourceQuota # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ResourceQuota body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ResourceQuota If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501 else: (data) = self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501 return data
[ "def", "create_namespaced_resource_quota", "(", "self", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "...
create_namespaced_resource_quota # noqa: E501 create a ResourceQuota # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ResourceQuota body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ResourceQuota If the method is called asynchronously, returns the request thread.
[ "create_namespaced_resource_quota", "#", "noqa", ":", "E501" ]
python
train
62.68
inonit/drf-haystack
drf_haystack/filters.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L67-L73
def get_query_builder(self, *args, **kwargs): """ Return the query builder class instance that should be used to build the query which is passed to the search engine backend. """ query_builder = self.get_query_builder_class() return query_builder(*args, **kwargs)
[ "def", "get_query_builder", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "query_builder", "=", "self", ".", "get_query_builder_class", "(", ")", "return", "query_builder", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return the query builder class instance that should be used to build the query which is passed to the search engine backend.
[ "Return", "the", "query", "builder", "class", "instance", "that", "should", "be", "used", "to", "build", "the", "query", "which", "is", "passed", "to", "the", "search", "engine", "backend", "." ]
python
train
43.571429
saltstack/salt
salt/returners/splunk.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/splunk.py#L70-L104
def _send_splunk(event, index_override=None, sourcetype_override=None): ''' Send the results to Splunk. Requires the Splunk HTTP Event Collector running on port 8088. This is available on Splunk Enterprise version 6.3 or higher. ''' # Get Splunk Options opts = _get_options() log.info(str('Options: %s'), # future lint: disable=blacklisted-function salt.utils.json.dumps(opts)) http_event_collector_key = opts['token'] http_event_collector_host = opts['indexer'] # Set up the collector splunk_event = http_event_collector(http_event_collector_key, http_event_collector_host) # init the payload payload = {} # Set up the event metadata if index_override is None: payload.update({"index": opts['index']}) else: payload.update({"index": index_override}) if sourcetype_override is None: payload.update({"sourcetype": opts['sourcetype']}) else: payload.update({"index": sourcetype_override}) # Add the event payload.update({"event": event}) log.info(str('Payload: %s'), # future lint: disable=blacklisted-function salt.utils.json.dumps(payload)) # Fire it off splunk_event.sendEvent(payload) return True
[ "def", "_send_splunk", "(", "event", ",", "index_override", "=", "None", ",", "sourcetype_override", "=", "None", ")", ":", "# Get Splunk Options", "opts", "=", "_get_options", "(", ")", "log", ".", "info", "(", "str", "(", "'Options: %s'", ")", ",", "# futu...
Send the results to Splunk. Requires the Splunk HTTP Event Collector running on port 8088. This is available on Splunk Enterprise version 6.3 or higher.
[ "Send", "the", "results", "to", "Splunk", ".", "Requires", "the", "Splunk", "HTTP", "Event", "Collector", "running", "on", "port", "8088", ".", "This", "is", "available", "on", "Splunk", "Enterprise", "version", "6", ".", "3", "or", "higher", "." ]
python
train
35.057143
Alignak-monitoring/alignak
alignak/objects/satellitelink.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L979-L991
def get_actions(self, params): """Send a HTTP request to the satellite (GET /_checks) Get actions from the scheduler. Un-serialize data received. :param params: the request parameters :type params: str :return: Actions list on success, [] on failure :rtype: list """ res = self.con.get('_checks', params, wait=True) logger.debug("Got checks to execute from %s: %s", self.name, res) return unserialize(res, True)
[ "def", "get_actions", "(", "self", ",", "params", ")", ":", "res", "=", "self", ".", "con", ".", "get", "(", "'_checks'", ",", "params", ",", "wait", "=", "True", ")", "logger", ".", "debug", "(", "\"Got checks to execute from %s: %s\"", ",", "self", "."...
Send a HTTP request to the satellite (GET /_checks) Get actions from the scheduler. Un-serialize data received. :param params: the request parameters :type params: str :return: Actions list on success, [] on failure :rtype: list
[ "Send", "a", "HTTP", "request", "to", "the", "satellite", "(", "GET", "/", "_checks", ")", "Get", "actions", "from", "the", "scheduler", ".", "Un", "-", "serialize", "data", "received", "." ]
python
train
37.538462
emc-openstack/storops
storops/vnx/resource/system.py
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/vnx/resource/system.py#L518-L531
def get_rsc_list_2(self, rsc_clz_list=None): """get the list of resource list to collect based on clz list :param rsc_clz_list: the list of classes to collect :return: filtered list of resource list, like [VNXLunList(), VNXDiskList()] """ rsc_list_2 = self._default_rsc_list_with_perf_stats() if rsc_clz_list is None: rsc_clz_list = ResourceList.get_rsc_clz_list(rsc_list_2) return [rsc_list for rsc_list in rsc_list_2 if rsc_list.get_resource_class() in rsc_clz_list]
[ "def", "get_rsc_list_2", "(", "self", ",", "rsc_clz_list", "=", "None", ")", ":", "rsc_list_2", "=", "self", ".", "_default_rsc_list_with_perf_stats", "(", ")", "if", "rsc_clz_list", "is", "None", ":", "rsc_clz_list", "=", "ResourceList", ".", "get_rsc_clz_list", ...
get the list of resource list to collect based on clz list :param rsc_clz_list: the list of classes to collect :return: filtered list of resource list, like [VNXLunList(), VNXDiskList()]
[ "get", "the", "list", "of", "resource", "list", "to", "collect", "based", "on", "clz", "list" ]
python
train
41
RudolfCardinal/pythonlib
cardinal_pythonlib/hash.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/hash.py#L188-L196
def hash(self, raw: Any) -> str: """ Returns the hex digest of a HMAC-encoded version of the input. """ with MultiTimerContext(timer, TIMING_HASH): raw_bytes = str(raw).encode('utf-8') hmac_obj = hmac.new(key=self.key_bytes, msg=raw_bytes, digestmod=self.digestmod) return hmac_obj.hexdigest()
[ "def", "hash", "(", "self", ",", "raw", ":", "Any", ")", "->", "str", ":", "with", "MultiTimerContext", "(", "timer", ",", "TIMING_HASH", ")", ":", "raw_bytes", "=", "str", "(", "raw", ")", ".", "encode", "(", "'utf-8'", ")", "hmac_obj", "=", "hmac",...
Returns the hex digest of a HMAC-encoded version of the input.
[ "Returns", "the", "hex", "digest", "of", "a", "HMAC", "-", "encoded", "version", "of", "the", "input", "." ]
python
train
42.777778
iotile/coretools
iotilecore/iotile/core/hw/auth/env_auth_provider.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/auth/env_auth_provider.py#L102-L145
def verify_report(self, device_id, root, data, signature, **kwargs): """Verify a buffer of report data on behalf of a device. Args: device_id (int): The id of the device that we should encrypt for root (int): The root key type that should be used to generate the report data (bytearray): The data that we should verify signature (bytearray): The signature attached to data that we should verify **kwargs: There are additional specific keyword args that are required depending on the root key used. Typically, you must specify - report_id (int): The report id - sent_timestamp (int): The sent timestamp of the report These two bits of information are used to construct the per report signing and encryption key from the specific root key type. Returns: dict: The result of the verification process must always be a bool under the 'verified' key, however additional keys may be present depending on the signature method used. Raises: NotFoundError: If the auth provider is not able to verify the data due to an error. If the data is simply not valid, then the function returns normally. """ report_key = self._verify_derive_key(device_id, root, **kwargs) message_hash = hashlib.sha256(data).digest() hmac_calc = hmac.new(report_key, message_hash, hashlib.sha256) result = bytearray(hmac_calc.digest()) if len(signature) == 0: verified = False elif len(signature) > len(result): verified = False elif len(signature) < len(result): trunc_result = result[:len(signature)] verified = hmac.compare_digest(signature, trunc_result) else: verified = hmac.compare_digest(signature, result) return {'verified': verified, 'bit_length': 8*len(signature)}
[ "def", "verify_report", "(", "self", ",", "device_id", ",", "root", ",", "data", ",", "signature", ",", "*", "*", "kwargs", ")", ":", "report_key", "=", "self", ".", "_verify_derive_key", "(", "device_id", ",", "root", ",", "*", "*", "kwargs", ")", "me...
Verify a buffer of report data on behalf of a device. Args: device_id (int): The id of the device that we should encrypt for root (int): The root key type that should be used to generate the report data (bytearray): The data that we should verify signature (bytearray): The signature attached to data that we should verify **kwargs: There are additional specific keyword args that are required depending on the root key used. Typically, you must specify - report_id (int): The report id - sent_timestamp (int): The sent timestamp of the report These two bits of information are used to construct the per report signing and encryption key from the specific root key type. Returns: dict: The result of the verification process must always be a bool under the 'verified' key, however additional keys may be present depending on the signature method used. Raises: NotFoundError: If the auth provider is not able to verify the data due to an error. If the data is simply not valid, then the function returns normally.
[ "Verify", "a", "buffer", "of", "report", "data", "on", "behalf", "of", "a", "device", "." ]
python
train
45.613636
inasafe/inasafe
safe/plugin.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/plugin.py#L428-L444
def _create_add_petabencana_layer_action(self): """Create action for import OSM Dialog.""" icon = resources_path('img', 'icons', 'add-petabencana-layer.svg') self.action_add_petabencana_layer = QAction( QIcon(icon), self.tr('Add PetaBencana Flood Layer'), self.iface.mainWindow()) self.action_add_petabencana_layer.setStatusTip(self.tr( 'Add PetaBencana Flood Layer')) self.action_add_petabencana_layer.setWhatsThis(self.tr( 'Use this to add a PetaBencana layer to your map. ' 'It needs internet access to function.')) self.action_add_petabencana_layer.triggered.connect( self.add_petabencana_layer) self.add_action( self.action_add_petabencana_layer, add_to_toolbar=self.full_toolbar)
[ "def", "_create_add_petabencana_layer_action", "(", "self", ")", ":", "icon", "=", "resources_path", "(", "'img'", ",", "'icons'", ",", "'add-petabencana-layer.svg'", ")", "self", ".", "action_add_petabencana_layer", "=", "QAction", "(", "QIcon", "(", "icon", ")", ...
Create action for import OSM Dialog.
[ "Create", "action", "for", "import", "OSM", "Dialog", "." ]
python
train
49
mikeboers/MultiMap
multimap.py
https://github.com/mikeboers/MultiMap/blob/0251e5d5df693cc247b4ac5b95adfdd10e3bec04/multimap.py#L206-L216
def iteritems(self): """Iterator across all the non-duplicate keys and their values. Only yields the first key of duplicates. """ keys_yielded = set() for k, v in self._pairs: if k not in keys_yielded: keys_yielded.add(k) yield k, v
[ "def", "iteritems", "(", "self", ")", ":", "keys_yielded", "=", "set", "(", ")", "for", "k", ",", "v", "in", "self", ".", "_pairs", ":", "if", "k", "not", "in", "keys_yielded", ":", "keys_yielded", ".", "add", "(", "k", ")", "yield", "k", ",", "v...
Iterator across all the non-duplicate keys and their values. Only yields the first key of duplicates.
[ "Iterator", "across", "all", "the", "non", "-", "duplicate", "keys", "and", "their", "values", ".", "Only", "yields", "the", "first", "key", "of", "duplicates", "." ]
python
train
29.454545
portantier/habu
habu/cli/cmd_shodan.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_shodan.py#L20-L66
def cmd_shodan(ip, no_cache, verbose, output): """Simple shodan API client. Prints the JSON result of a shodan query. Example: \b $ habu.shodan 8.8.8.8 { "hostnames": [ "google-public-dns-a.google.com" ], "country_code": "US", "org": "Google", "data": [ { "isp": "Google", "transport": "udp", "data": "Recursion: enabled", "asn": "AS15169", "port": 53, "hostnames": [ "google-public-dns-a.google.com" ] } ], "ports": [ 53 ] } """ habucfg = loadcfg() if 'SHODAN_APIKEY' not in habucfg: print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY') print('Get your API key from https://www.shodan.io/') sys.exit(1) if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose) output.write(json.dumps(data, indent=4)) output.write('\n')
[ "def", "cmd_shodan", "(", "ip", ",", "no_cache", ",", "verbose", ",", "output", ")", ":", "habucfg", "=", "loadcfg", "(", ")", "if", "'SHODAN_APIKEY'", "not", "in", "habucfg", ":", "print", "(", "'You must provide a shodan apikey. Use the ~/.habu.json file (variable...
Simple shodan API client. Prints the JSON result of a shodan query. Example: \b $ habu.shodan 8.8.8.8 { "hostnames": [ "google-public-dns-a.google.com" ], "country_code": "US", "org": "Google", "data": [ { "isp": "Google", "transport": "udp", "data": "Recursion: enabled", "asn": "AS15169", "port": 53, "hostnames": [ "google-public-dns-a.google.com" ] } ], "ports": [ 53 ] }
[ "Simple", "shodan", "API", "client", "." ]
python
train
25.297872
bitesofcode/projex
projex/enum.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/enum.py#L115-L124
def all(self): """ Returns all the values joined together. :return <int> """ out = 0 for key, value in self.items(): out |= value return out
[ "def", "all", "(", "self", ")", ":", "out", "=", "0", "for", "key", ",", "value", "in", "self", ".", "items", "(", ")", ":", "out", "|=", "value", "return", "out" ]
Returns all the values joined together. :return <int>
[ "Returns", "all", "the", "values", "joined", "together", ".", ":", "return", "<int", ">" ]
python
train
21.2