text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def attr(prev, attr_name): """attr pipe can extract attribute value of object. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_name: The name of attribute :type attr_name: str :returns: generator """ for obj in prev: if hasattr(obj, attr_name): yield getattr(obj, attr_name)
[ "def", "attr", "(", "prev", ",", "attr_name", ")", ":", "for", "obj", "in", "prev", ":", "if", "hasattr", "(", "obj", ",", "attr_name", ")", ":", "yield", "getattr", "(", "obj", ",", "attr_name", ")" ]
28.416667
11.916667
def updateTable(self, networkId, tableType, body, class_, verbose=None): """ Updates the table specified by the `tableType` and `networkId` parameters. New columns will be created if they do not exist in the target table. Current limitations: * Numbers are handled as Double * List column is not supported in this version :param networkId: SUID containing the table :param tableType: Type of table :param body: The data with which to update the table. :param class_: None -- Not required, can be None :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'', method="PUT", body=body, verbose=verbose) return response
[ "def", "updateTable", "(", "self", ",", "networkId", ",", "tableType", ",", "body", ",", "class_", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'networks/'", "+", "str", "(", "networkId", ")", "+", "'/tables/'", "+", "str", "(", "tableType", ")", "+", "''", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ",", "verbose", "=", "verbose", ")", "return", "response" ]
43.263158
26.210526
def get_public_network_ip(ips, public_subnet): """ Given a public subnet, chose the one IP from the remote host that exists within the subnet range. """ for ip in ips: if net.ip_in_subnet(ip, public_subnet): return ip msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet)) raise RuntimeError(msg)
[ "def", "get_public_network_ip", "(", "ips", ",", "public_subnet", ")", ":", "for", "ip", "in", "ips", ":", "if", "net", ".", "ip_in_subnet", "(", "ip", ",", "public_subnet", ")", ":", "return", "ip", "msg", "=", "\"IPs (%s) are not valid for any of subnet specified %s\"", "%", "(", "str", "(", "ips", ")", ",", "str", "(", "public_subnet", ")", ")", "raise", "RuntimeError", "(", "msg", ")" ]
37.5
17.3
def _parse_mibs(iLOIP, snmp_credentials): """Parses the MIBs. :param iLOIP: IP address of the server on which SNMP discovery has to be executed. :param snmp_credentials: a Dictionary of SNMP credentials. auth_user: SNMP user auth_protocol: Auth Protocol auth_prot_pp: Pass phrase value for AuthProtocol. priv_protocol:Privacy Protocol. auth_priv_pp: Pass phrase value for Privacy Protocol. :returns the dictionary of parsed MIBs. :raises exception.InvalidInputError if pysnmp is unable to get SNMP data due to wrong inputs provided. :raises exception.IloError if pysnmp raises any exception. """ result = {} usm_user_obj = _create_usm_user_obj(snmp_credentials) try: for(errorIndication, errorStatus, errorIndex, varBinds) in hlapi.nextCmd( hlapi.SnmpEngine(), usm_user_obj, hlapi.UdpTransportTarget((iLOIP, 161), timeout=3, retries=3), hlapi.ContextData(), # cpqida cpqDaPhyDrvTable Drive Array Physical Drive Table hlapi.ObjectType( hlapi.ObjectIdentity('1.3.6.1.4.1.232.3.2.5.1')), # cpqscsi SCSI Physical Drive Table hlapi.ObjectType( hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.2.4.1')), # cpqscsi SAS Physical Drive Table hlapi.ObjectType( hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.5.2.1')), lexicographicMode=False, ignoreNonIncreasingOid=True): if errorIndication: LOG.error(errorIndication) msg = "SNMP failed to traverse MIBs %s", errorIndication raise exception.IloSNMPInvalidInputFailure(msg) else: if errorStatus: msg = ('Parsing MIBs failed. %s at %s' % ( errorStatus.prettyPrint(), errorIndex and varBinds[-1][int(errorIndex)-1] or '?' ) ) LOG.error(msg) raise exception.IloSNMPInvalidInputFailure(msg) else: for varBindTableRow in varBinds: name, val = tuple(varBindTableRow) oid, label, suffix = ( mibViewController.getNodeName(name)) key = name.prettyPrint() # Don't traverse outside the tables we requested if not (key.find("SNMPv2-SMI::enterprises.232.3") >= 0 or (key.find( "SNMPv2-SMI::enterprises.232.5") >= 0)): break if key not in result: result[key] = {} result[key][label[-1]] = {} result[key][label[-1]][suffix] = val except Exception as e: msg = "SNMP library failed with error %s", e LOG.error(msg) raise exception.IloSNMPExceptionFailure(msg) return result
[ "def", "_parse_mibs", "(", "iLOIP", ",", "snmp_credentials", ")", ":", "result", "=", "{", "}", "usm_user_obj", "=", "_create_usm_user_obj", "(", "snmp_credentials", ")", "try", ":", "for", "(", "errorIndication", ",", "errorStatus", ",", "errorIndex", ",", "varBinds", ")", "in", "hlapi", ".", "nextCmd", "(", "hlapi", ".", "SnmpEngine", "(", ")", ",", "usm_user_obj", ",", "hlapi", ".", "UdpTransportTarget", "(", "(", "iLOIP", ",", "161", ")", ",", "timeout", "=", "3", ",", "retries", "=", "3", ")", ",", "hlapi", ".", "ContextData", "(", ")", ",", "# cpqida cpqDaPhyDrvTable Drive Array Physical Drive Table", "hlapi", ".", "ObjectType", "(", "hlapi", ".", "ObjectIdentity", "(", "'1.3.6.1.4.1.232.3.2.5.1'", ")", ")", ",", "# cpqscsi SCSI Physical Drive Table", "hlapi", ".", "ObjectType", "(", "hlapi", ".", "ObjectIdentity", "(", "'1.3.6.1.4.1.232.5.2.4.1'", ")", ")", ",", "# cpqscsi SAS Physical Drive Table", "hlapi", ".", "ObjectType", "(", "hlapi", ".", "ObjectIdentity", "(", "'1.3.6.1.4.1.232.5.5.2.1'", ")", ")", ",", "lexicographicMode", "=", "False", ",", "ignoreNonIncreasingOid", "=", "True", ")", ":", "if", "errorIndication", ":", "LOG", ".", "error", "(", "errorIndication", ")", "msg", "=", "\"SNMP failed to traverse MIBs %s\"", ",", "errorIndication", "raise", "exception", ".", "IloSNMPInvalidInputFailure", "(", "msg", ")", "else", ":", "if", "errorStatus", ":", "msg", "=", "(", "'Parsing MIBs failed. %s at %s'", "%", "(", "errorStatus", ".", "prettyPrint", "(", ")", ",", "errorIndex", "and", "varBinds", "[", "-", "1", "]", "[", "int", "(", "errorIndex", ")", "-", "1", "]", "or", "'?'", ")", ")", "LOG", ".", "error", "(", "msg", ")", "raise", "exception", ".", "IloSNMPInvalidInputFailure", "(", "msg", ")", "else", ":", "for", "varBindTableRow", "in", "varBinds", ":", "name", ",", "val", "=", "tuple", "(", "varBindTableRow", ")", "oid", ",", "label", ",", "suffix", "=", "(", "mibViewController", ".", "getNodeName", "(", "name", ")", ")", "key", "=", "name", ".", "prettyPrint", "(", ")", "# Don't traverse outside the tables we requested", "if", "not", "(", "key", ".", "find", "(", "\"SNMPv2-SMI::enterprises.232.3\"", ")", ">=", "0", "or", "(", "key", ".", "find", "(", "\"SNMPv2-SMI::enterprises.232.5\"", ")", ">=", "0", ")", ")", ":", "break", "if", "key", "not", "in", "result", ":", "result", "[", "key", "]", "=", "{", "}", "result", "[", "key", "]", "[", "label", "[", "-", "1", "]", "]", "=", "{", "}", "result", "[", "key", "]", "[", "label", "[", "-", "1", "]", "]", "[", "suffix", "]", "=", "val", "except", "Exception", "as", "e", ":", "msg", "=", "\"SNMP library failed with error %s\"", ",", "e", "LOG", ".", "error", "(", "msg", ")", "raise", "exception", ".", "IloSNMPExceptionFailure", "(", "msg", ")", "return", "result" ]
44
15.890411
def set_journal_comment(self, comment=None): """Sets a comment. arg: comment (string): the new comment raise: InvalidArgument - comment is invalid raise: NoAccess - metadata.is_readonly() is true raise: NullArgument - comment is null compliance: mandatory - This method must be implemented. """ if comment is None: raise NullArgument() metadata = Metadata(**settings.METADATA['comment']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(comment, metadata, array=False): self._my_map['journalComment']['text'] = comment else: raise InvalidArgument()
[ "def", "set_journal_comment", "(", "self", ",", "comment", "=", "None", ")", ":", "if", "comment", "is", "None", ":", "raise", "NullArgument", "(", ")", "metadata", "=", "Metadata", "(", "*", "*", "settings", ".", "METADATA", "[", "'comment'", "]", ")", "if", "metadata", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "if", "self", ".", "_is_valid_input", "(", "comment", ",", "metadata", ",", "array", "=", "False", ")", ":", "self", ".", "_my_map", "[", "'journalComment'", "]", "[", "'text'", "]", "=", "comment", "else", ":", "raise", "InvalidArgument", "(", ")" ]
36.947368
15
def setLogEntries(self, entries, save=True, update_policies=True): """Sets list of log entries. If save=True, saves the log. If update_policies=True, also updates default policies based on these entries""" prev = None; # "previous" valid entry for "Prev" link uplink = os.path.join("..", Purr.RenderIndex.INDEX); # "Up" link self.entries = [] for entry in entries: if not entry.ignore: entry.setPrevUpNextLinks(prev=prev, up=uplink) prev = entry self.entries.append(entry) if save: self.save() if update_policies: # populate default policies and renames based on entry list self._default_dp_props = {} for entry in entries: self.updatePoliciesFromEntry(entry) dprint(4, "default policies:", self._default_dp_props)
[ "def", "setLogEntries", "(", "self", ",", "entries", ",", "save", "=", "True", ",", "update_policies", "=", "True", ")", ":", "prev", "=", "None", "# \"previous\" valid entry for \"Prev\" link", "uplink", "=", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "Purr", ".", "RenderIndex", ".", "INDEX", ")", "# \"Up\" link", "self", ".", "entries", "=", "[", "]", "for", "entry", "in", "entries", ":", "if", "not", "entry", ".", "ignore", ":", "entry", ".", "setPrevUpNextLinks", "(", "prev", "=", "prev", ",", "up", "=", "uplink", ")", "prev", "=", "entry", "self", ".", "entries", ".", "append", "(", "entry", ")", "if", "save", ":", "self", ".", "save", "(", ")", "if", "update_policies", ":", "# populate default policies and renames based on entry list", "self", ".", "_default_dp_props", "=", "{", "}", "for", "entry", "in", "entries", ":", "self", ".", "updatePoliciesFromEntry", "(", "entry", ")", "dprint", "(", "4", ",", "\"default policies:\"", ",", "self", ".", "_default_dp_props", ")" ]
47.263158
14.789474
def clone(self): """Create a complete copy of the package. :returns: A new MaterialPackage object.""" result = copy.copy(self) result._compound_masses = copy.deepcopy(self._compound_masses) return result
[ "def", "clone", "(", "self", ")", ":", "result", "=", "copy", ".", "copy", "(", "self", ")", "result", ".", "_compound_masses", "=", "copy", ".", "deepcopy", "(", "self", ".", "_compound_masses", ")", "return", "result" ]
29.75
20.125
def contextMenuEvent(self, event): """ Add menu action: * 'Show line numbers' * 'Save to file' """ menu = QtWidgets.QTextEdit.createStandardContextMenu(self) # create max.lines spin box: w = QtWidgets.QWidget() l = QtWidgets.QHBoxLayout() w.setLayout(l) e = QtWidgets.QSpinBox() e.setRange(1, 1e6) e.setValue(self.MAXLINES) e.valueChanged.connect(self.document().setMaximumBlockCount) l.addWidget(QtWidgets.QLabel('Max. lines')) l.addWidget(e) # add spinbox to menu: a = QtWidgets.QWidgetAction(self) a.setDefaultWidget(w) menu.addAction(a) menu.exec_(event.globalPos())
[ "def", "contextMenuEvent", "(", "self", ",", "event", ")", ":", "menu", "=", "QtWidgets", ".", "QTextEdit", ".", "createStandardContextMenu", "(", "self", ")", "# create max.lines spin box:", "w", "=", "QtWidgets", ".", "QWidget", "(", ")", "l", "=", "QtWidgets", ".", "QHBoxLayout", "(", ")", "w", ".", "setLayout", "(", "l", ")", "e", "=", "QtWidgets", ".", "QSpinBox", "(", ")", "e", ".", "setRange", "(", "1", ",", "1e6", ")", "e", ".", "setValue", "(", "self", ".", "MAXLINES", ")", "e", ".", "valueChanged", ".", "connect", "(", "self", ".", "document", "(", ")", ".", "setMaximumBlockCount", ")", "l", ".", "addWidget", "(", "QtWidgets", ".", "QLabel", "(", "'Max. lines'", ")", ")", "l", ".", "addWidget", "(", "e", ")", "# add spinbox to menu:", "a", "=", "QtWidgets", ".", "QWidgetAction", "(", "self", ")", "a", ".", "setDefaultWidget", "(", "w", ")", "menu", ".", "addAction", "(", "a", ")", "menu", ".", "exec_", "(", "event", ".", "globalPos", "(", ")", ")" ]
28.68
14.28
def filter_spouts(table, header): """ filter to keep spouts """ spouts_info = [] for row in table: if row[0] == 'spout': spouts_info.append(row) return spouts_info, header
[ "def", "filter_spouts", "(", "table", ",", "header", ")", ":", "spouts_info", "=", "[", "]", "for", "row", "in", "table", ":", "if", "row", "[", "0", "]", "==", "'spout'", ":", "spouts_info", ".", "append", "(", "row", ")", "return", "spouts_info", ",", "header" ]
26.142857
12.571429
def targets_for_class(self, target, classname): """Search which targets from `target`'s transitive dependencies contain `classname`.""" targets_with_class = set() for target in target.closure(): for one_class in self._target_classes(target): if classname in one_class: targets_with_class.add(target) break return targets_with_class
[ "def", "targets_for_class", "(", "self", ",", "target", ",", "classname", ")", ":", "targets_with_class", "=", "set", "(", ")", "for", "target", "in", "target", ".", "closure", "(", ")", ":", "for", "one_class", "in", "self", ".", "_target_classes", "(", "target", ")", ":", "if", "classname", "in", "one_class", ":", "targets_with_class", ".", "add", "(", "target", ")", "break", "return", "targets_with_class" ]
37.3
11.6
def push(self, new_scope=None): """Create a new scope :returns: TODO """ if new_scope is None: new_scope = { "types": {}, "vars": {} } self._curr_scope = new_scope self._dlog("pushing new scope, scope level = {}".format(self.level())) self._scope_stack.append(self._curr_scope)
[ "def", "push", "(", "self", ",", "new_scope", "=", "None", ")", ":", "if", "new_scope", "is", "None", ":", "new_scope", "=", "{", "\"types\"", ":", "{", "}", ",", "\"vars\"", ":", "{", "}", "}", "self", ".", "_curr_scope", "=", "new_scope", "self", ".", "_dlog", "(", "\"pushing new scope, scope level = {}\"", ".", "format", "(", "self", ".", "level", "(", ")", ")", ")", "self", ".", "_scope_stack", ".", "append", "(", "self", ".", "_curr_scope", ")" ]
29.076923
15.230769
def grant_revoke(grant, database, user, host='localhost', grant_option=False, escape=True, **connection_args): ''' Removes a grant from the MySQL server. CLI Example: .. code-block:: bash salt '*' mysql.grant_revoke \ 'SELECT,INSERT,UPDATE' 'database.*' 'frank' 'localhost' ''' dbc = _connect(**connection_args) if dbc is None: return False cur = dbc.cursor() grant = __grant_normalize(grant) if salt.utils.data.is_true(grant_option): grant += ', GRANT OPTION' db_part = database.rpartition('.') dbc = db_part[0] table = db_part[2] if dbc != '*': # _ and % are authorized on GRANT queries and should get escaped # on the db name, but only if not requesting a table level grant s_database = quote_identifier(dbc, for_grants=(table == '*')) if dbc == '*': # add revoke for *.* # before the modification query send to mysql will looks like # REVOKE SELECT ON `*`.* FROM %(user)s@%(host)s s_database = dbc if table != '*': table = quote_identifier(table) # identifiers cannot be used as values, same thing for grants qry = 'REVOKE {0} ON {1}.{2} FROM %(user)s@%(host)s;'.format( grant, s_database, table ) args = {} args['user'] = user args['host'] = host try: _execute(cur, qry, args) except MySQLdb.OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return False if not grant_exists(grant, database, user, host, grant_option, escape, **connection_args): log.info( 'Grant \'%s\' on \'%s\' for user \'%s\' has been ' 'revoked', grant, database, user) return True log.info( 'Grant \'%s\' on \'%s\' for user \'%s\' has NOT been ' 'revoked', grant, database, user) return False
[ "def", "grant_revoke", "(", "grant", ",", "database", ",", "user", ",", "host", "=", "'localhost'", ",", "grant_option", "=", "False", ",", "escape", "=", "True", ",", "*", "*", "connection_args", ")", ":", "dbc", "=", "_connect", "(", "*", "*", "connection_args", ")", "if", "dbc", "is", "None", ":", "return", "False", "cur", "=", "dbc", ".", "cursor", "(", ")", "grant", "=", "__grant_normalize", "(", "grant", ")", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "grant_option", ")", ":", "grant", "+=", "', GRANT OPTION'", "db_part", "=", "database", ".", "rpartition", "(", "'.'", ")", "dbc", "=", "db_part", "[", "0", "]", "table", "=", "db_part", "[", "2", "]", "if", "dbc", "!=", "'*'", ":", "# _ and % are authorized on GRANT queries and should get escaped", "# on the db name, but only if not requesting a table level grant", "s_database", "=", "quote_identifier", "(", "dbc", ",", "for_grants", "=", "(", "table", "==", "'*'", ")", ")", "if", "dbc", "==", "'*'", ":", "# add revoke for *.*", "# before the modification query send to mysql will looks like", "# REVOKE SELECT ON `*`.* FROM %(user)s@%(host)s", "s_database", "=", "dbc", "if", "table", "!=", "'*'", ":", "table", "=", "quote_identifier", "(", "table", ")", "# identifiers cannot be used as values, same thing for grants", "qry", "=", "'REVOKE {0} ON {1}.{2} FROM %(user)s@%(host)s;'", ".", "format", "(", "grant", ",", "s_database", ",", "table", ")", "args", "=", "{", "}", "args", "[", "'user'", "]", "=", "user", "args", "[", "'host'", "]", "=", "host", "try", ":", "_execute", "(", "cur", ",", "qry", ",", "args", ")", "except", "MySQLdb", ".", "OperationalError", "as", "exc", ":", "err", "=", "'MySQL Error {0}: {1}'", ".", "format", "(", "*", "exc", ".", "args", ")", "__context__", "[", "'mysql.error'", "]", "=", "err", "log", ".", "error", "(", "err", ")", "return", "False", "if", "not", "grant_exists", "(", "grant", ",", "database", ",", "user", ",", "host", ",", "grant_option", ",", "escape", ",", "*", "*", "connection_args", ")", ":", "log", ".", "info", "(", "'Grant \\'%s\\' on \\'%s\\' for user \\'%s\\' has been '", "'revoked'", ",", "grant", ",", "database", ",", "user", ")", "return", "True", "log", ".", "info", "(", "'Grant \\'%s\\' on \\'%s\\' for user \\'%s\\' has NOT been '", "'revoked'", ",", "grant", ",", "database", ",", "user", ")", "return", "False" ]
28.64
19.12
def send_extpos(self, pos): """ Send the current Crazyflie X, Y, Z position. This is going to be forwarded to the Crazyflie's position estimator. """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.POSITION_CH pk.data = struct.pack('<fff', pos[0], pos[1], pos[2]) self._cf.send_packet(pk)
[ "def", "send_extpos", "(", "self", ",", "pos", ")", ":", "pk", "=", "CRTPPacket", "(", ")", "pk", ".", "port", "=", "CRTPPort", ".", "LOCALIZATION", "pk", ".", "channel", "=", "self", ".", "POSITION_CH", "pk", ".", "data", "=", "struct", ".", "pack", "(", "'<fff'", ",", "pos", "[", "0", "]", ",", "pos", "[", "1", "]", ",", "pos", "[", "2", "]", ")", "self", ".", "_cf", ".", "send_packet", "(", "pk", ")" ]
33.727273
13.545455
def _send_user_message(self, data): """ send a message, but block if we're in key negotiation. this is used for user-initiated requests. """ start = time.time() while True: self.clear_to_send.wait(0.1) if not self.active: self._log(DEBUG, 'Dropping user packet because connection is dead.') return self.clear_to_send_lock.acquire() if self.clear_to_send.isSet(): break self.clear_to_send_lock.release() if time.time() > start + self.clear_to_send_timeout: raise SSHException('Key-exchange timed out waiting for key negotiation') try: self._send_message(data) finally: self.clear_to_send_lock.release()
[ "def", "_send_user_message", "(", "self", ",", "data", ")", ":", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "self", ".", "clear_to_send", ".", "wait", "(", "0.1", ")", "if", "not", "self", ".", "active", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Dropping user packet because connection is dead.'", ")", "return", "self", ".", "clear_to_send_lock", ".", "acquire", "(", ")", "if", "self", ".", "clear_to_send", ".", "isSet", "(", ")", ":", "break", "self", ".", "clear_to_send_lock", ".", "release", "(", ")", "if", "time", ".", "time", "(", ")", ">", "start", "+", "self", ".", "clear_to_send_timeout", ":", "raise", "SSHException", "(", "'Key-exchange timed out waiting for key negotiation'", ")", "try", ":", "self", ".", "_send_message", "(", "data", ")", "finally", ":", "self", ".", "clear_to_send_lock", ".", "release", "(", ")" ]
38.285714
14.857143
def parse_bangrc(): """ Parses ``$HOME/.bangrc`` for global settings and deployer credentials. The ``.bangrc`` file is expected to be a YAML file whose outermost structure is a key-value map. Note that even though ``.bangrc`` is just a YAML file in which a user could store any top-level keys, it is not expected to be used as a holder of default values for stack-specific configuration attributes - if present, they will be ignored. Returns {} if ``$HOME/.bangrc`` does not exist. :rtype: :class:`dict` """ raw = read_raw_bangrc() return dict((k, raw[k]) for k in raw if k in RC_KEYS)
[ "def", "parse_bangrc", "(", ")", ":", "raw", "=", "read_raw_bangrc", "(", ")", "return", "dict", "(", "(", "k", ",", "raw", "[", "k", "]", ")", "for", "k", "in", "raw", "if", "k", "in", "RC_KEYS", ")" ]
34.777778
25.444444
def density(self, r, rho0, gamma): """ computes the density :param x: :param y: :param rho0: :param a: :param s: :return: """ rho = rho0 / r**gamma return rho
[ "def", "density", "(", "self", ",", "r", ",", "rho0", ",", "gamma", ")", ":", "rho", "=", "rho0", "/", "r", "**", "gamma", "return", "rho" ]
19.583333
15.583333
def fit_transform(self, raw_documents, y=None): """ Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array, [n_samples, n_features] Document-term matrix. """ documents = super(CountVectorizer, self).fit_transform( raw_documents=raw_documents, y=y) self.n = len(raw_documents) m = (self.transform(raw_documents) > 0).astype(int) m = m.sum(axis=0).A1 self.period_ = m self.df_ = m / self.n return documents
[ "def", "fit_transform", "(", "self", ",", "raw_documents", ",", "y", "=", "None", ")", ":", "documents", "=", "super", "(", "CountVectorizer", ",", "self", ")", ".", "fit_transform", "(", "raw_documents", "=", "raw_documents", ",", "y", "=", "y", ")", "self", ".", "n", "=", "len", "(", "raw_documents", ")", "m", "=", "(", "self", ".", "transform", "(", "raw_documents", ")", ">", "0", ")", ".", "astype", "(", "int", ")", "m", "=", "m", ".", "sum", "(", "axis", "=", "0", ")", ".", "A1", "self", ".", "period_", "=", "m", "self", ".", "df_", "=", "m", "/", "self", ".", "n", "return", "documents" ]
34.086957
17.173913
def max_heapify(arr, end, simulation, iteration): """ Max heapify helper for max_heap_sort """ last_parent = (end - 1) // 2 # Iterate from last parent to first for parent in range(last_parent, -1, -1): current_parent = parent # Iterate from current_parent to last_parent while current_parent <= last_parent: # Find greatest child of current_parent child = 2 * current_parent + 1 if child + 1 <= end and arr[child] < arr[child + 1]: child = child + 1 # Swap if child is greater than parent if arr[child] > arr[current_parent]: arr[current_parent], arr[child] = arr[child], arr[current_parent] current_parent = child if simulation: iteration = iteration + 1 print("iteration",iteration,":",*arr) # If no swap occured, no need to keep iterating else: break arr[0], arr[end] = arr[end], arr[0] return iteration
[ "def", "max_heapify", "(", "arr", ",", "end", ",", "simulation", ",", "iteration", ")", ":", "last_parent", "=", "(", "end", "-", "1", ")", "//", "2", "# Iterate from last parent to first", "for", "parent", "in", "range", "(", "last_parent", ",", "-", "1", ",", "-", "1", ")", ":", "current_parent", "=", "parent", "# Iterate from current_parent to last_parent", "while", "current_parent", "<=", "last_parent", ":", "# Find greatest child of current_parent", "child", "=", "2", "*", "current_parent", "+", "1", "if", "child", "+", "1", "<=", "end", "and", "arr", "[", "child", "]", "<", "arr", "[", "child", "+", "1", "]", ":", "child", "=", "child", "+", "1", "# Swap if child is greater than parent", "if", "arr", "[", "child", "]", ">", "arr", "[", "current_parent", "]", ":", "arr", "[", "current_parent", "]", ",", "arr", "[", "child", "]", "=", "arr", "[", "child", "]", ",", "arr", "[", "current_parent", "]", "current_parent", "=", "child", "if", "simulation", ":", "iteration", "=", "iteration", "+", "1", "print", "(", "\"iteration\"", ",", "iteration", ",", "\":\"", ",", "*", "arr", ")", "# If no swap occured, no need to keep iterating", "else", ":", "break", "arr", "[", "0", "]", ",", "arr", "[", "end", "]", "=", "arr", "[", "end", "]", ",", "arr", "[", "0", "]", "return", "iteration" ]
37.071429
13.821429
def _convert_indirect_jump_targets_to_states(job, indirect_jump_targets): """ Convert each concrete indirect jump target into a SimState. :param job: The CFGJob instance. :param indirect_jump_targets: A collection of concrete jump targets resolved from a indirect jump. :return: A list of SimStates. :rtype: list """ successors = [ ] for t in indirect_jump_targets: # Insert new successors a = job.sim_successors.all_successors[0].copy() a.ip = t successors.append(a) return successors
[ "def", "_convert_indirect_jump_targets_to_states", "(", "job", ",", "indirect_jump_targets", ")", ":", "successors", "=", "[", "]", "for", "t", "in", "indirect_jump_targets", ":", "# Insert new successors", "a", "=", "job", ".", "sim_successors", ".", "all_successors", "[", "0", "]", ".", "copy", "(", ")", "a", ".", "ip", "=", "t", "successors", ".", "append", "(", "a", ")", "return", "successors" ]
39.294118
19.764706
def write(self, outfile): """ Save the likelihood results as a sparse HEALPix map. """ data = odict() data['PIXEL']=self.roi.pixels_target # Full data output (too large for survey) if self.config['scan']['full_pdf']: data['LOG_LIKELIHOOD']=self.log_likelihood_sparse_array.T data['RICHNESS']=self.richness_sparse_array.T data['RICHNESS_LOWER']=self.richness_lower_sparse_array.T data['RICHNESS_UPPER']=self.richness_upper_sparse_array.T data['RICHNESS_LIMIT']=self.richness_upper_limit_sparse_array.T #data['STELLAR_MASS']=self.stellar_mass_sparse_array.T data['FRACTION_OBSERVABLE']=self.fraction_observable_sparse_array.T else: data['LOG_LIKELIHOOD']=self.log_likelihood_sparse_array.T data['RICHNESS']=self.richness_sparse_array.T data['FRACTION_OBSERVABLE']=self.fraction_observable_sparse_array.T # Convert to 32bit float for k in list(data.keys())[1:]: data[k] = data[k].astype('f4',copy=False) # Stellar mass can be calculated from STELLAR * RICHNESS header = odict() header['STELLAR']=round(self.stellar_mass_conversion,8) header['LKDNSIDE']=self.config['coords']['nside_likelihood'] header['LKDPIX']=ang2pix(self.config['coords']['nside_likelihood'], self.roi.lon,self.roi.lat) header['NROI']=self.roi.inROI(self.loglike.catalog_roi.lon, self.loglike.catalog_roi.lat).sum() header['NANNULUS']=self.roi.inAnnulus(self.loglike.catalog_roi.lon, self.loglike.catalog_roi.lat).sum() header['NINSIDE']=self.roi.inInterior(self.loglike.catalog_roi.lon, self.loglike.catalog_roi.lat).sum() header['NTARGET']=self.roi.inTarget(self.loglike.catalog_roi.lon, self.loglike.catalog_roi.lat).sum() # Flatten if there is only a single distance modulus # ADW: Is this really what we want to do? if len(self.distance_modulus_array) == 1: for key in data: data[key] = data[key].flatten() logger.info("Writing %s..."%outfile) write_partial_map(outfile,data, nside=self.config['coords']['nside_pixel'], header=header, clobber=True ) fitsio.write(outfile, dict(DISTANCE_MODULUS=self.distance_modulus_array.astype('f4',copy=False)), extname='DISTANCE_MODULUS', clobber=False)
[ "def", "write", "(", "self", ",", "outfile", ")", ":", "data", "=", "odict", "(", ")", "data", "[", "'PIXEL'", "]", "=", "self", ".", "roi", ".", "pixels_target", "# Full data output (too large for survey)", "if", "self", ".", "config", "[", "'scan'", "]", "[", "'full_pdf'", "]", ":", "data", "[", "'LOG_LIKELIHOOD'", "]", "=", "self", ".", "log_likelihood_sparse_array", ".", "T", "data", "[", "'RICHNESS'", "]", "=", "self", ".", "richness_sparse_array", ".", "T", "data", "[", "'RICHNESS_LOWER'", "]", "=", "self", ".", "richness_lower_sparse_array", ".", "T", "data", "[", "'RICHNESS_UPPER'", "]", "=", "self", ".", "richness_upper_sparse_array", ".", "T", "data", "[", "'RICHNESS_LIMIT'", "]", "=", "self", ".", "richness_upper_limit_sparse_array", ".", "T", "#data['STELLAR_MASS']=self.stellar_mass_sparse_array.T", "data", "[", "'FRACTION_OBSERVABLE'", "]", "=", "self", ".", "fraction_observable_sparse_array", ".", "T", "else", ":", "data", "[", "'LOG_LIKELIHOOD'", "]", "=", "self", ".", "log_likelihood_sparse_array", ".", "T", "data", "[", "'RICHNESS'", "]", "=", "self", ".", "richness_sparse_array", ".", "T", "data", "[", "'FRACTION_OBSERVABLE'", "]", "=", "self", ".", "fraction_observable_sparse_array", ".", "T", "# Convert to 32bit float", "for", "k", "in", "list", "(", "data", ".", "keys", "(", ")", ")", "[", "1", ":", "]", ":", "data", "[", "k", "]", "=", "data", "[", "k", "]", ".", "astype", "(", "'f4'", ",", "copy", "=", "False", ")", "# Stellar mass can be calculated from STELLAR * RICHNESS", "header", "=", "odict", "(", ")", "header", "[", "'STELLAR'", "]", "=", "round", "(", "self", ".", "stellar_mass_conversion", ",", "8", ")", "header", "[", "'LKDNSIDE'", "]", "=", "self", ".", "config", "[", "'coords'", "]", "[", "'nside_likelihood'", "]", "header", "[", "'LKDPIX'", "]", "=", "ang2pix", "(", "self", ".", "config", "[", "'coords'", "]", "[", "'nside_likelihood'", "]", ",", "self", ".", "roi", ".", "lon", ",", "self", ".", "roi", ".", "lat", ")", "header", "[", "'NROI'", "]", "=", "self", ".", "roi", ".", "inROI", "(", "self", ".", "loglike", ".", "catalog_roi", ".", "lon", ",", "self", ".", "loglike", ".", "catalog_roi", ".", "lat", ")", ".", "sum", "(", ")", "header", "[", "'NANNULUS'", "]", "=", "self", ".", "roi", ".", "inAnnulus", "(", "self", ".", "loglike", ".", "catalog_roi", ".", "lon", ",", "self", ".", "loglike", ".", "catalog_roi", ".", "lat", ")", ".", "sum", "(", ")", "header", "[", "'NINSIDE'", "]", "=", "self", ".", "roi", ".", "inInterior", "(", "self", ".", "loglike", ".", "catalog_roi", ".", "lon", ",", "self", ".", "loglike", ".", "catalog_roi", ".", "lat", ")", ".", "sum", "(", ")", "header", "[", "'NTARGET'", "]", "=", "self", ".", "roi", ".", "inTarget", "(", "self", ".", "loglike", ".", "catalog_roi", ".", "lon", ",", "self", ".", "loglike", ".", "catalog_roi", ".", "lat", ")", ".", "sum", "(", ")", "# Flatten if there is only a single distance modulus", "# ADW: Is this really what we want to do?", "if", "len", "(", "self", ".", "distance_modulus_array", ")", "==", "1", ":", "for", "key", "in", "data", ":", "data", "[", "key", "]", "=", "data", "[", "key", "]", ".", "flatten", "(", ")", "logger", ".", "info", "(", "\"Writing %s...\"", "%", "outfile", ")", "write_partial_map", "(", "outfile", ",", "data", ",", "nside", "=", "self", ".", "config", "[", "'coords'", "]", "[", "'nside_pixel'", "]", ",", "header", "=", "header", ",", "clobber", "=", "True", ")", "fitsio", ".", "write", "(", "outfile", ",", "dict", "(", "DISTANCE_MODULUS", "=", "self", ".", "distance_modulus_array", ".", "astype", "(", "'f4'", ",", "copy", "=", "False", ")", ")", ",", "extname", "=", "'DISTANCE_MODULUS'", ",", "clobber", "=", "False", ")" ]
49.392857
21.464286
def _sanity_check_every_location_is_marked(ir_blocks): """Ensure that every new location is marked with a MarkLocation block.""" # Exactly one MarkLocation block is found between any block that starts an interval of blocks # that all affect the same query position, and the first subsequent block that affects a # different position in the query. Such intervals include the following examples: # - from Fold to Unfold # - from QueryRoot to Traverse/Recurse # - from one Traverse to the next Traverse # - from Traverse to Backtrack found_start_block = False mark_location_blocks_count = 0 start_interval_types = (QueryRoot, Traverse, Recurse, Fold) end_interval_types = (Backtrack, ConstructResult, Recurse, Traverse, Unfold) for block in ir_blocks: # Terminate started intervals before opening new ones. if isinstance(block, end_interval_types) and found_start_block: found_start_block = False if mark_location_blocks_count != 1: raise AssertionError(u'Expected 1 MarkLocation block between traversals, found: ' u'{} {}'.format(mark_location_blocks_count, ir_blocks)) # Now consider opening new intervals or processing MarkLocation blocks. if isinstance(block, MarkLocation): mark_location_blocks_count += 1 elif isinstance(block, start_interval_types): found_start_block = True mark_location_blocks_count = 0
[ "def", "_sanity_check_every_location_is_marked", "(", "ir_blocks", ")", ":", "# Exactly one MarkLocation block is found between any block that starts an interval of blocks", "# that all affect the same query position, and the first subsequent block that affects a", "# different position in the query. Such intervals include the following examples:", "# - from Fold to Unfold", "# - from QueryRoot to Traverse/Recurse", "# - from one Traverse to the next Traverse", "# - from Traverse to Backtrack", "found_start_block", "=", "False", "mark_location_blocks_count", "=", "0", "start_interval_types", "=", "(", "QueryRoot", ",", "Traverse", ",", "Recurse", ",", "Fold", ")", "end_interval_types", "=", "(", "Backtrack", ",", "ConstructResult", ",", "Recurse", ",", "Traverse", ",", "Unfold", ")", "for", "block", "in", "ir_blocks", ":", "# Terminate started intervals before opening new ones.", "if", "isinstance", "(", "block", ",", "end_interval_types", ")", "and", "found_start_block", ":", "found_start_block", "=", "False", "if", "mark_location_blocks_count", "!=", "1", ":", "raise", "AssertionError", "(", "u'Expected 1 MarkLocation block between traversals, found: '", "u'{} {}'", ".", "format", "(", "mark_location_blocks_count", ",", "ir_blocks", ")", ")", "# Now consider opening new intervals or processing MarkLocation blocks.", "if", "isinstance", "(", "block", ",", "MarkLocation", ")", ":", "mark_location_blocks_count", "+=", "1", "elif", "isinstance", "(", "block", ",", "start_interval_types", ")", ":", "found_start_block", "=", "True", "mark_location_blocks_count", "=", "0" ]
51.344828
22.206897
def check_type(self, value): """Hook for type-checking, invoked during assignment. raises TypeError if neither value nor self.dtype are None and they do not match. will not raise an exception if either value or self.dtype is None """ if self.__dict__['dtype'] is None: return elif value is None: return elif isinstance(value, self.__dict__['dtype']): return msg = "Value of type %s, when %s was expected." % ( type(value), self.__dict__['dtype']) raise TypeError(msg)
[ "def", "check_type", "(", "self", ",", "value", ")", ":", "if", "self", ".", "__dict__", "[", "'dtype'", "]", "is", "None", ":", "return", "elif", "value", "is", "None", ":", "return", "elif", "isinstance", "(", "value", ",", "self", ".", "__dict__", "[", "'dtype'", "]", ")", ":", "return", "msg", "=", "\"Value of type %s, when %s was expected.\"", "%", "(", "type", "(", "value", ")", ",", "self", ".", "__dict__", "[", "'dtype'", "]", ")", "raise", "TypeError", "(", "msg", ")" ]
34.176471
18.411765
def iter_all_repos(self, number=-1, since=None, etag=None, per_page=None): """Iterate over every repository in the order they were created. :param int number: (optional), number of repositories to return. Default: -1, returns all of them :param int since: (optional), last repository id seen (allows restarting this iteration) :param str etag: (optional), ETag from a previous request to the same endpoint :param int per_page: (optional), number of repositories to list per request :returns: generator of :class:`Repository <github3.repos.Repository>` """ url = self._build_url('repositories') return self._iter(int(number), url, Repository, params={'since': since, 'per_page': per_page}, etag=etag)
[ "def", "iter_all_repos", "(", "self", ",", "number", "=", "-", "1", ",", "since", "=", "None", ",", "etag", "=", "None", ",", "per_page", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'repositories'", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "Repository", ",", "params", "=", "{", "'since'", ":", "since", ",", "'per_page'", ":", "per_page", "}", ",", "etag", "=", "etag", ")" ]
50.352941
20.411765
def _base_placeholder(self): """ Return the notes master placeholder this notes slide placeholder inherits from, or |None| if no placeholder of the matching type is present. """ notes_master = self.part.notes_master ph_type = self.element.ph_type return notes_master.placeholders.get(ph_type=ph_type)
[ "def", "_base_placeholder", "(", "self", ")", ":", "notes_master", "=", "self", ".", "part", ".", "notes_master", "ph_type", "=", "self", ".", "element", ".", "ph_type", "return", "notes_master", ".", "placeholders", ".", "get", "(", "ph_type", "=", "ph_type", ")" ]
39.555556
14.444444
def converter(input_string, block_size=2): """ The cli tool as a built-in function. :param input_string: A string that should be converted to a set of facts. :type input_string: str. :param blocks_size: Optional block size of sentences (Default: 2). :type block_size: int. """ sentences = textprocessing.getSentences(input_string) blocks = textprocessing.getBlocks(sentences, block_size) parse.makeIdentifiers(blocks)
[ "def", "converter", "(", "input_string", ",", "block_size", "=", "2", ")", ":", "sentences", "=", "textprocessing", ".", "getSentences", "(", "input_string", ")", "blocks", "=", "textprocessing", ".", "getBlocks", "(", "sentences", ",", "block_size", ")", "parse", ".", "makeIdentifiers", "(", "blocks", ")" ]
34.384615
16.846154
def calculate_checksum_on_bytes( b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm. """ checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm) checksum_calc.update(b) return checksum_calc.hexdigest()
[ "def", "calculate_checksum_on_bytes", "(", "b", ",", "algorithm", "=", "d1_common", ".", "const", ".", "DEFAULT_CHECKSUM_ALGORITHM", ")", ":", "checksum_calc", "=", "get_checksum_calculator_by_dataone_designator", "(", "algorithm", ")", "checksum_calc", ".", "update", "(", "b", ")", "return", "checksum_calc", ".", "hexdigest", "(", ")" ]
29.090909
26
def get_string(self, significant_figures=6): """ Returns the string representation of simulation box in LAMMPS data file format. Args: significant_figures (int): No. of significant figures to output for box settings. Default to 6. Returns: String representation """ ph = "{:.%df}" % significant_figures lines = [] for bound, d in zip(self.bounds, "xyz"): fillers = bound + [d] * 2 bound_format = " ".join([ph] * 2 + [" {}lo {}hi"]) lines.append(bound_format.format(*fillers)) if self.tilt: tilt_format = " ".join([ph] * 3 + [" xy xz yz"]) lines.append(tilt_format.format(*self.tilt)) return "\n".join(lines)
[ "def", "get_string", "(", "self", ",", "significant_figures", "=", "6", ")", ":", "ph", "=", "\"{:.%df}\"", "%", "significant_figures", "lines", "=", "[", "]", "for", "bound", ",", "d", "in", "zip", "(", "self", ".", "bounds", ",", "\"xyz\"", ")", ":", "fillers", "=", "bound", "+", "[", "d", "]", "*", "2", "bound_format", "=", "\" \"", ".", "join", "(", "[", "ph", "]", "*", "2", "+", "[", "\" {}lo {}hi\"", "]", ")", "lines", ".", "append", "(", "bound_format", ".", "format", "(", "*", "fillers", ")", ")", "if", "self", ".", "tilt", ":", "tilt_format", "=", "\" \"", ".", "join", "(", "[", "ph", "]", "*", "3", "+", "[", "\" xy xz yz\"", "]", ")", "lines", ".", "append", "(", "tilt_format", ".", "format", "(", "*", "self", ".", "tilt", ")", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
33.73913
17.652174
def reset(self): """ Reset the state of the sandbox. http://docs.fiesta.cc/sandbox.html#post--reset """ path = 'reset' request_data = {} # Need to put data into the request to force urllib2 to make it a POST request response_data = self.request(path, request_data) success = response_data['reset'] # True of False return success
[ "def", "reset", "(", "self", ")", ":", "path", "=", "'reset'", "request_data", "=", "{", "}", "# Need to put data into the request to force urllib2 to make it a POST request", "response_data", "=", "self", ".", "request", "(", "path", ",", "request_data", ")", "success", "=", "response_data", "[", "'reset'", "]", "# True of False", "return", "success" ]
39.5
17.5
def normalize_example_nlp(task, example, is_infer, vocab_type, vocab_offset, max_input_length, max_target_length, fixed_train_length): """Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature. """ if task.has_inputs: example["inputs"] = example["inputs"][:-1] # remove EOS token if hasattr(task, "class_labels"): if vocab_type == text_problems.VocabType.CHARACTER: # TODO(urvashik): handle the case where num_labels > 9 example["targets"] = tf.cast(discretization.int_to_bit( example["targets"], 1, base=10) + 50, tf.int64) example["targets"] = tf.squeeze(example["targets"], axis=[-1]) elif vocab_type == text_problems.VocabType.SUBWORD: example["targets"] = vocab_offset + example["targets"] else: # sequence with inputs and targets eg: summarization if task.has_inputs: if max_input_length > 0: example["inputs"] = example["inputs"][:max_input_length] # Do not truncate targets during inference with beam decoding. if max_target_length > 0 and not is_infer: example["targets"] = example["targets"][:max_target_length] def make_constant_shape(x, size): x = x[:size] xlen = tf.shape(x)[0] x = tf.pad(x, [[0, size - xlen]]) return tf.reshape(x, [size]) if task.has_inputs: if is_infer: concat_list = [example["inputs"], [task.task_id]] example["inputs"] = tf.concat(concat_list, axis=0) else: inputs = example.pop("inputs") concat_list = [inputs, [task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) else: concat_list = [[task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if not is_infer and fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) example["task_id"] = tf.constant([task.task_id], dtype=tf.int64) return example
[ "def", "normalize_example_nlp", "(", "task", ",", "example", ",", "is_infer", ",", "vocab_type", ",", "vocab_offset", ",", "max_input_length", ",", "max_target_length", ",", "fixed_train_length", ")", ":", "if", "task", ".", "has_inputs", ":", "example", "[", "\"inputs\"", "]", "=", "example", "[", "\"inputs\"", "]", "[", ":", "-", "1", "]", "# remove EOS token", "if", "hasattr", "(", "task", ",", "\"class_labels\"", ")", ":", "if", "vocab_type", "==", "text_problems", ".", "VocabType", ".", "CHARACTER", ":", "# TODO(urvashik): handle the case where num_labels > 9", "example", "[", "\"targets\"", "]", "=", "tf", ".", "cast", "(", "discretization", ".", "int_to_bit", "(", "example", "[", "\"targets\"", "]", ",", "1", ",", "base", "=", "10", ")", "+", "50", ",", "tf", ".", "int64", ")", "example", "[", "\"targets\"", "]", "=", "tf", ".", "squeeze", "(", "example", "[", "\"targets\"", "]", ",", "axis", "=", "[", "-", "1", "]", ")", "elif", "vocab_type", "==", "text_problems", ".", "VocabType", ".", "SUBWORD", ":", "example", "[", "\"targets\"", "]", "=", "vocab_offset", "+", "example", "[", "\"targets\"", "]", "else", ":", "# sequence with inputs and targets eg: summarization", "if", "task", ".", "has_inputs", ":", "if", "max_input_length", ">", "0", ":", "example", "[", "\"inputs\"", "]", "=", "example", "[", "\"inputs\"", "]", "[", ":", "max_input_length", "]", "# Do not truncate targets during inference with beam decoding.", "if", "max_target_length", ">", "0", "and", "not", "is_infer", ":", "example", "[", "\"targets\"", "]", "=", "example", "[", "\"targets\"", "]", "[", ":", "max_target_length", "]", "def", "make_constant_shape", "(", "x", ",", "size", ")", ":", "x", "=", "x", "[", ":", "size", "]", "xlen", "=", "tf", ".", "shape", "(", "x", ")", "[", "0", "]", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "size", "-", "xlen", "]", "]", ")", "return", "tf", ".", "reshape", "(", "x", ",", "[", "size", "]", ")", "if", "task", ".", "has_inputs", ":", "if", "is_infer", ":", "concat_list", "=", "[", "example", "[", "\"inputs\"", "]", ",", "[", "task", ".", "task_id", "]", "]", "example", "[", "\"inputs\"", "]", "=", "tf", ".", "concat", "(", "concat_list", ",", "axis", "=", "0", ")", "else", ":", "inputs", "=", "example", ".", "pop", "(", "\"inputs\"", ")", "concat_list", "=", "[", "inputs", ",", "[", "task", ".", "task_id", "]", ",", "example", "[", "\"targets\"", "]", "]", "example", "[", "\"targets\"", "]", "=", "tf", ".", "concat", "(", "concat_list", ",", "axis", "=", "0", ")", "if", "fixed_train_length", ">", "0", ":", "example", "[", "\"targets\"", "]", "=", "make_constant_shape", "(", "example", "[", "\"targets\"", "]", ",", "fixed_train_length", ")", "else", ":", "concat_list", "=", "[", "[", "task", ".", "task_id", "]", ",", "example", "[", "\"targets\"", "]", "]", "example", "[", "\"targets\"", "]", "=", "tf", ".", "concat", "(", "concat_list", ",", "axis", "=", "0", ")", "if", "not", "is_infer", "and", "fixed_train_length", ">", "0", ":", "example", "[", "\"targets\"", "]", "=", "make_constant_shape", "(", "example", "[", "\"targets\"", "]", ",", "fixed_train_length", ")", "example", "[", "\"task_id\"", "]", "=", "tf", ".", "constant", "(", "[", "task", ".", "task_id", "]", ",", "dtype", "=", "tf", ".", "int64", ")", "return", "example" ]
43.352113
21.014085
def exec_output(cls, command, shell=True, encoding='utf-8'): """ Return execution output :param encoding: charset used to decode the stdout :type encoding: str :return: the return of the command :rtype: unicode string """ proc = Popen(command, shell=shell, stdout=PIPE) stdout, _stderr = proc.communicate() if proc.returncode == 0: return stdout.decode(encoding) return ''
[ "def", "exec_output", "(", "cls", ",", "command", ",", "shell", "=", "True", ",", "encoding", "=", "'utf-8'", ")", ":", "proc", "=", "Popen", "(", "command", ",", "shell", "=", "shell", ",", "stdout", "=", "PIPE", ")", "stdout", ",", "_stderr", "=", "proc", ".", "communicate", "(", ")", "if", "proc", ".", "returncode", "==", "0", ":", "return", "stdout", ".", "decode", "(", "encoding", ")", "return", "''" ]
30.2
15.666667
def setup_menu_actions(self): """Setup and update the menu actions.""" self.recent_project_menu.clear() self.recent_projects_actions = [] if self.recent_projects: for project in self.recent_projects: if self.is_valid_project(project): name = project.replace(get_home_dir(), '~') action = create_action( self, name, icon=ima.icon('project'), triggered=( lambda _, p=project: self.open_project(path=p)) ) self.recent_projects_actions.append(action) else: self.recent_projects.remove(project) self.recent_projects_actions += [None, self.clear_recent_projects_action] else: self.recent_projects_actions = [self.clear_recent_projects_action] add_actions(self.recent_project_menu, self.recent_projects_actions) self.update_project_actions()
[ "def", "setup_menu_actions", "(", "self", ")", ":", "self", ".", "recent_project_menu", ".", "clear", "(", ")", "self", ".", "recent_projects_actions", "=", "[", "]", "if", "self", ".", "recent_projects", ":", "for", "project", "in", "self", ".", "recent_projects", ":", "if", "self", ".", "is_valid_project", "(", "project", ")", ":", "name", "=", "project", ".", "replace", "(", "get_home_dir", "(", ")", ",", "'~'", ")", "action", "=", "create_action", "(", "self", ",", "name", ",", "icon", "=", "ima", ".", "icon", "(", "'project'", ")", ",", "triggered", "=", "(", "lambda", "_", ",", "p", "=", "project", ":", "self", ".", "open_project", "(", "path", "=", "p", ")", ")", ")", "self", ".", "recent_projects_actions", ".", "append", "(", "action", ")", "else", ":", "self", ".", "recent_projects", ".", "remove", "(", "project", ")", "self", ".", "recent_projects_actions", "+=", "[", "None", ",", "self", ".", "clear_recent_projects_action", "]", "else", ":", "self", ".", "recent_projects_actions", "=", "[", "self", ".", "clear_recent_projects_action", "]", "add_actions", "(", "self", ".", "recent_project_menu", ",", "self", ".", "recent_projects_actions", ")", "self", ".", "update_project_actions", "(", ")" ]
47.125
15.25
def _split_covariance_into_marginals(covariance, block_sizes): """Split a covariance matrix into block-diagonal marginals of given sizes.""" start_dim = 0 marginals = [] for size in block_sizes: end_dim = start_dim + size marginals.append(covariance[..., start_dim:end_dim, start_dim:end_dim]) start_dim = end_dim return marginals
[ "def", "_split_covariance_into_marginals", "(", "covariance", ",", "block_sizes", ")", ":", "start_dim", "=", "0", "marginals", "=", "[", "]", "for", "size", "in", "block_sizes", ":", "end_dim", "=", "start_dim", "+", "size", "marginals", ".", "append", "(", "covariance", "[", "...", ",", "start_dim", ":", "end_dim", ",", "start_dim", ":", "end_dim", "]", ")", "start_dim", "=", "end_dim", "return", "marginals" ]
38.222222
18.777778
def set_quiet(mres, parent, global_options): """ Sets the 'quiet' property on the MultiResult """ quiet = global_options.get('quiet') if quiet is not None: mres._quiet = quiet else: mres._quiet = parent.quiet
[ "def", "set_quiet", "(", "mres", ",", "parent", ",", "global_options", ")", ":", "quiet", "=", "global_options", ".", "get", "(", "'quiet'", ")", "if", "quiet", "is", "not", "None", ":", "mres", ".", "_quiet", "=", "quiet", "else", ":", "mres", ".", "_quiet", "=", "parent", ".", "quiet" ]
26.666667
8.666667
def search_url(obj, url_data, pageno, seen_objs): """Recurse through a PDF object, searching for URLs.""" if isinstance(obj, PDFObjRef): if obj.objid in seen_objs: # prevent recursive loops return seen_objs.add(obj.objid) obj = obj.resolve() if isinstance(obj, dict): for key, value in obj.items(): if key == 'URI' and isinstance(value, basestring): # URIs should be 7bit ASCII encoded, but be safe and encode # to unicode # XXX this does not use an optional specified base URL url = strformat.unicode_safe(value) url_data.add_url(url, page=pageno) else: search_url(value, url_data, pageno, seen_objs) elif isinstance(obj, list): for elem in obj: search_url(elem, url_data, pageno, seen_objs) elif isinstance(obj, PDFStream): search_url(obj.attrs, url_data, pageno, seen_objs)
[ "def", "search_url", "(", "obj", ",", "url_data", ",", "pageno", ",", "seen_objs", ")", ":", "if", "isinstance", "(", "obj", ",", "PDFObjRef", ")", ":", "if", "obj", ".", "objid", "in", "seen_objs", ":", "# prevent recursive loops", "return", "seen_objs", ".", "add", "(", "obj", ".", "objid", ")", "obj", "=", "obj", ".", "resolve", "(", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "obj", ".", "items", "(", ")", ":", "if", "key", "==", "'URI'", "and", "isinstance", "(", "value", ",", "basestring", ")", ":", "# URIs should be 7bit ASCII encoded, but be safe and encode", "# to unicode", "# XXX this does not use an optional specified base URL", "url", "=", "strformat", ".", "unicode_safe", "(", "value", ")", "url_data", ".", "add_url", "(", "url", ",", "page", "=", "pageno", ")", "else", ":", "search_url", "(", "value", ",", "url_data", ",", "pageno", ",", "seen_objs", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "for", "elem", "in", "obj", ":", "search_url", "(", "elem", ",", "url_data", ",", "pageno", ",", "seen_objs", ")", "elif", "isinstance", "(", "obj", ",", "PDFStream", ")", ":", "search_url", "(", "obj", ".", "attrs", ",", "url_data", ",", "pageno", ",", "seen_objs", ")" ]
42.521739
13.434783
def insert( self, table_name, obj=None, database=None, overwrite=False, partition=None, values=None, validate=True, ): """ Insert into existing table. See ImpalaTable.insert for other parameters. Parameters ---------- table_name : string database : string, default None Examples -------- >>> table = 'my_table' >>> con.insert(table, table_expr) # doctest: +SKIP # Completely overwrite contents >>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP """ table = self.table(table_name, database=database) return table.insert( obj=obj, overwrite=overwrite, partition=partition, values=values, validate=validate, )
[ "def", "insert", "(", "self", ",", "table_name", ",", "obj", "=", "None", ",", "database", "=", "None", ",", "overwrite", "=", "False", ",", "partition", "=", "None", ",", "values", "=", "None", ",", "validate", "=", "True", ",", ")", ":", "table", "=", "self", ".", "table", "(", "table_name", ",", "database", "=", "database", ")", "return", "table", ".", "insert", "(", "obj", "=", "obj", ",", "overwrite", "=", "overwrite", ",", "partition", "=", "partition", ",", "values", "=", "values", ",", "validate", "=", "validate", ",", ")" ]
23.805556
19.194444
def _merge_single_runs(self, other_trajectory, used_runs): """ Updates the `run_information` of the current trajectory.""" count = len(self) # Variable to count the increasing new run indices and create # new run names run_indices = range(len(other_trajectory)) run_name_dict = OrderedDict() to_store_groups_with_annotations = [] for idx in run_indices: # Iterate through all used runs and store annotated groups and mark results and # derived parameters for merging if idx in used_runs: # Update the run information dict of the current trajectory other_info_dict = other_trajectory.f_get_run_information(idx) time_ = other_info_dict['time'] timestamp = other_info_dict['timestamp'] completed = other_info_dict['completed'] short_environment_hexsha = other_info_dict['short_environment_hexsha'] finish_timestamp = other_info_dict['finish_timestamp'] runtime = other_info_dict['runtime'] new_idx = used_runs[idx] new_runname = self.f_wildcard('$', new_idx) run_name_dict[idx] = new_runname info_dict = dict( idx=new_idx, time=time_, timestamp=timestamp, completed=completed, short_environment_hexsha=short_environment_hexsha, finish_timestamp=finish_timestamp, runtime=runtime) self._add_run_info(**info_dict)
[ "def", "_merge_single_runs", "(", "self", ",", "other_trajectory", ",", "used_runs", ")", ":", "count", "=", "len", "(", "self", ")", "# Variable to count the increasing new run indices and create", "# new run names", "run_indices", "=", "range", "(", "len", "(", "other_trajectory", ")", ")", "run_name_dict", "=", "OrderedDict", "(", ")", "to_store_groups_with_annotations", "=", "[", "]", "for", "idx", "in", "run_indices", ":", "# Iterate through all used runs and store annotated groups and mark results and", "# derived parameters for merging", "if", "idx", "in", "used_runs", ":", "# Update the run information dict of the current trajectory", "other_info_dict", "=", "other_trajectory", ".", "f_get_run_information", "(", "idx", ")", "time_", "=", "other_info_dict", "[", "'time'", "]", "timestamp", "=", "other_info_dict", "[", "'timestamp'", "]", "completed", "=", "other_info_dict", "[", "'completed'", "]", "short_environment_hexsha", "=", "other_info_dict", "[", "'short_environment_hexsha'", "]", "finish_timestamp", "=", "other_info_dict", "[", "'finish_timestamp'", "]", "runtime", "=", "other_info_dict", "[", "'runtime'", "]", "new_idx", "=", "used_runs", "[", "idx", "]", "new_runname", "=", "self", ".", "f_wildcard", "(", "'$'", ",", "new_idx", ")", "run_name_dict", "[", "idx", "]", "=", "new_runname", "info_dict", "=", "dict", "(", "idx", "=", "new_idx", ",", "time", "=", "time_", ",", "timestamp", "=", "timestamp", ",", "completed", "=", "completed", ",", "short_environment_hexsha", "=", "short_environment_hexsha", ",", "finish_timestamp", "=", "finish_timestamp", ",", "runtime", "=", "runtime", ")", "self", ".", "_add_run_info", "(", "*", "*", "info_dict", ")" ]
41.538462
20.461538
def getProjectionRaw(self, eEye): """ The components necessary to build your own projection matrix in case your application is doing something fancy like infinite Z """ fn = self.function_table.getProjectionRaw pfLeft = c_float() pfRight = c_float() pfTop = c_float() pfBottom = c_float() fn(eEye, byref(pfLeft), byref(pfRight), byref(pfTop), byref(pfBottom)) return pfLeft.value, pfRight.value, pfTop.value, pfBottom.value
[ "def", "getProjectionRaw", "(", "self", ",", "eEye", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getProjectionRaw", "pfLeft", "=", "c_float", "(", ")", "pfRight", "=", "c_float", "(", ")", "pfTop", "=", "c_float", "(", ")", "pfBottom", "=", "c_float", "(", ")", "fn", "(", "eEye", ",", "byref", "(", "pfLeft", ")", ",", "byref", "(", "pfRight", ")", ",", "byref", "(", "pfTop", ")", ",", "byref", "(", "pfBottom", ")", ")", "return", "pfLeft", ".", "value", ",", "pfRight", ".", "value", ",", "pfTop", ".", "value", ",", "pfBottom", ".", "value" ]
38.461538
18.461538
def vec(self): """:obj:`numpy.ndarray` : Vector representation for this camera. """ return np.r_[self.fx, self.fy, self.cx, self.cy, self.skew, self.height, self.width]
[ "def", "vec", "(", "self", ")", ":", "return", "np", ".", "r_", "[", "self", ".", "fx", ",", "self", ".", "fy", ",", "self", ".", "cx", ",", "self", ".", "cy", ",", "self", ".", "skew", ",", "self", ".", "height", ",", "self", ".", "width", "]" ]
47.25
19.5
def parse_stations(html): """ Strips JS code, loads JSON """ html = html.replace('SLs.sls=', '').replace(';SLs.showSuggestion();', '') html = json.loads(html) return html['suggestions']
[ "def", "parse_stations", "(", "html", ")", ":", "html", "=", "html", ".", "replace", "(", "'SLs.sls='", ",", "''", ")", ".", "replace", "(", "';SLs.showSuggestion();'", ",", "''", ")", "html", "=", "json", ".", "loads", "(", "html", ")", "return", "html", "[", "'suggestions'", "]" ]
29.571429
11.571429
def gesture(self, start1, start2, *args, **kwargs): ''' perform two point gesture. Usage: d().gesture(startPoint1, startPoint2).to(endPoint1, endPoint2, steps) d().gesture(startPoint1, startPoint2, endPoint1, endPoint2, steps) ''' def to(obj_self, end1, end2, steps=100): ctp = lambda pt: point(*pt) if type(pt) == tuple else pt # convert tuple to point s1, s2, e1, e2 = ctp(start1), ctp(start2), ctp(end1), ctp(end2) return self.jsonrpc.gesture(self.selector, s1, s2, e1, e2, steps) obj = type("Gesture", (object,), {"to": to})() return obj if len(args) == 0 else to(None, *args, **kwargs)
[ "def", "gesture", "(", "self", ",", "start1", ",", "start2", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "to", "(", "obj_self", ",", "end1", ",", "end2", ",", "steps", "=", "100", ")", ":", "ctp", "=", "lambda", "pt", ":", "point", "(", "*", "pt", ")", "if", "type", "(", "pt", ")", "==", "tuple", "else", "pt", "# convert tuple to point", "s1", ",", "s2", ",", "e1", ",", "e2", "=", "ctp", "(", "start1", ")", ",", "ctp", "(", "start2", ")", ",", "ctp", "(", "end1", ")", ",", "ctp", "(", "end2", ")", "return", "self", ".", "jsonrpc", ".", "gesture", "(", "self", ".", "selector", ",", "s1", ",", "s2", ",", "e1", ",", "e2", ",", "steps", ")", "obj", "=", "type", "(", "\"Gesture\"", ",", "(", "object", ",", ")", ",", "{", "\"to\"", ":", "to", "}", ")", "(", ")", "return", "obj", "if", "len", "(", "args", ")", "==", "0", "else", "to", "(", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
52.846154
26.692308
def exclude_current_instance(self, queryset): """ If an instance is being updated, then do not include that instance itself as a uniqueness conflict. """ if self.instance is not None: return queryset.exclude(pk=self.instance.pk) return queryset
[ "def", "exclude_current_instance", "(", "self", ",", "queryset", ")", ":", "if", "self", ".", "instance", "is", "not", "None", ":", "return", "queryset", ".", "exclude", "(", "pk", "=", "self", ".", "instance", ".", "pk", ")", "return", "queryset" ]
37.125
9.375
def to_camel_case(snake_case_string): """ Convert a string from snake case to camel case. For example, "some_var" would become "someVar". :param snake_case_string: Snake-cased string to convert to camel case. :returns: Camel-cased version of snake_case_string. """ parts = snake_case_string.lstrip('_').split('_') return parts[0] + ''.join([i.title() for i in parts[1:]])
[ "def", "to_camel_case", "(", "snake_case_string", ")", ":", "parts", "=", "snake_case_string", ".", "lstrip", "(", "'_'", ")", ".", "split", "(", "'_'", ")", "return", "parts", "[", "0", "]", "+", "''", ".", "join", "(", "[", "i", ".", "title", "(", ")", "for", "i", "in", "parts", "[", "1", ":", "]", "]", ")" ]
43.555556
20.444444
def trigger_event(name, event, value1=None, value2=None, value3=None ): ''' Trigger an event in IFTTT .. code-block:: yaml ifttt-event: ifttt.trigger_event: - event: TestEvent - value1: 'A value that we want to send.' - value2: 'A second value that we want to send.' - value3: 'A third value that we want to send.' The following parameters are required: name The unique name for this event. event The name of the event to trigger in IFTTT. The following parameters are optional: value1 One of the values that we can send to IFTT. value2 One of the values that we can send to IFTT. value3 One of the values that we can send to IFTT. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if __opts__['test']: ret['comment'] = 'The following trigger would be sent to IFTTT: {0}'.format(event) ret['result'] = None return ret ret['result'] = __salt__['ifttt.trigger_event']( event=event, value1=value1, value2=value2, value3=value3 ) if ret and ret['result']: ret['result'] = True ret['comment'] = 'Triggered Event: {0}'.format(name) else: ret['comment'] = 'Failed to trigger event: {0}'.format(name) return ret
[ "def", "trigger_event", "(", "name", ",", "event", ",", "value1", "=", "None", ",", "value2", "=", "None", ",", "value3", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'The following trigger would be sent to IFTTT: {0}'", ".", "format", "(", "event", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'ifttt.trigger_event'", "]", "(", "event", "=", "event", ",", "value1", "=", "value1", ",", "value2", "=", "value2", ",", "value3", "=", "value3", ")", "if", "ret", "and", "ret", "[", "'result'", "]", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Triggered Event: {0}'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'Failed to trigger event: {0}'", ".", "format", "(", "name", ")", "return", "ret" ]
23.918033
22.934426
def _poll_task(task_id, server_config, poll_rate=None, timeout=None): """Implement :meth:`nailgun.entities.ForemanTask.poll`. See :meth:`nailgun.entities.ForemanTask.poll` for a full description of how this method acts. Other methods may also call this method, such as :meth:`nailgun.entity_mixins.EntityDeleteMixin.delete`. Certain mixins benefit from being able to poll the server after performing an operation. However, this module cannot use :meth:`nailgun.entities.ForemanTask.poll`, as that would be a circular import. Placing the implementation of :meth:`nailgun.entities.ForemanTask.poll` here allows both that method and the mixins in this module to use the same logic. """ if poll_rate is None: poll_rate = TASK_POLL_RATE if timeout is None: timeout = TASK_TIMEOUT # Implement the timeout. def raise_task_timeout(): # pragma: no cover """Raise a KeyboardInterrupt exception in the main thread.""" thread.interrupt_main() timer = threading.Timer(timeout, raise_task_timeout) # Poll until the task finishes. The timeout prevents an infinite loop. path = '{0}/foreman_tasks/api/tasks/{1}'.format(server_config.url, task_id) try: timer.start() while True: response = client.get(path, **server_config.get_client_kwargs()) response.raise_for_status() task_info = response.json() if task_info['state'] in ('paused', 'stopped'): break time.sleep(poll_rate) except KeyboardInterrupt: # pragma: no cover # raise_task_timeout will raise a KeyboardInterrupt when the timeout # expires. Catch the exception and raise TaskTimedOutError raise TaskTimedOutError( 'Timed out polling task {0}. Task information: {1}' .format(task_id, task_info) ) finally: timer.cancel() # Check for task success or failure. if task_info['result'] != 'success': raise TaskFailedError( 'Task {0} did not succeed. Task information: {1}' .format(task_id, task_info) ) return task_info
[ "def", "_poll_task", "(", "task_id", ",", "server_config", ",", "poll_rate", "=", "None", ",", "timeout", "=", "None", ")", ":", "if", "poll_rate", "is", "None", ":", "poll_rate", "=", "TASK_POLL_RATE", "if", "timeout", "is", "None", ":", "timeout", "=", "TASK_TIMEOUT", "# Implement the timeout.", "def", "raise_task_timeout", "(", ")", ":", "# pragma: no cover", "\"\"\"Raise a KeyboardInterrupt exception in the main thread.\"\"\"", "thread", ".", "interrupt_main", "(", ")", "timer", "=", "threading", ".", "Timer", "(", "timeout", ",", "raise_task_timeout", ")", "# Poll until the task finishes. The timeout prevents an infinite loop.", "path", "=", "'{0}/foreman_tasks/api/tasks/{1}'", ".", "format", "(", "server_config", ".", "url", ",", "task_id", ")", "try", ":", "timer", ".", "start", "(", ")", "while", "True", ":", "response", "=", "client", ".", "get", "(", "path", ",", "*", "*", "server_config", ".", "get_client_kwargs", "(", ")", ")", "response", ".", "raise_for_status", "(", ")", "task_info", "=", "response", ".", "json", "(", ")", "if", "task_info", "[", "'state'", "]", "in", "(", "'paused'", ",", "'stopped'", ")", ":", "break", "time", ".", "sleep", "(", "poll_rate", ")", "except", "KeyboardInterrupt", ":", "# pragma: no cover", "# raise_task_timeout will raise a KeyboardInterrupt when the timeout", "# expires. Catch the exception and raise TaskTimedOutError", "raise", "TaskTimedOutError", "(", "'Timed out polling task {0}. Task information: {1}'", ".", "format", "(", "task_id", ",", "task_info", ")", ")", "finally", ":", "timer", ".", "cancel", "(", ")", "# Check for task success or failure.", "if", "task_info", "[", "'result'", "]", "!=", "'success'", ":", "raise", "TaskFailedError", "(", "'Task {0} did not succeed. Task information: {1}'", ".", "format", "(", "task_id", ",", "task_info", ")", ")", "return", "task_info" ]
38.818182
20.254545
def coalesce(*fields, **kwargs): """ Return a function which accepts a row and returns the first non-missing value from the specified fields. Intended for use with :func:`petl.transform.basics.addfield`. """ missing = kwargs.get('missing', None) default = kwargs.get('default', None) def _coalesce(row): for f in fields: v = row[f] if v is not missing: return v return default return _coalesce
[ "def", "coalesce", "(", "*", "fields", ",", "*", "*", "kwargs", ")", ":", "missing", "=", "kwargs", ".", "get", "(", "'missing'", ",", "None", ")", "default", "=", "kwargs", ".", "get", "(", "'default'", ",", "None", ")", "def", "_coalesce", "(", "row", ")", ":", "for", "f", "in", "fields", ":", "v", "=", "row", "[", "f", "]", "if", "v", "is", "not", "missing", ":", "return", "v", "return", "default", "return", "_coalesce" ]
26.166667
16.611111
def buffer(self, buffer, buffer_format: str, attribute_names, per_instance=False): """ Register a buffer/vbo for the VAO. This can be called multiple times. adding multiple buffers (interleaved or not) Args: buffer: The buffer data. Can be ``numpy.array``, ``moderngl.Buffer`` or ``bytes``. buffer_format (str): The format of the buffer. (eg. ``3f 3f`` for interleaved positions and normals). attribute_names: A list of attribute names this buffer should map to. Keyword Args: per_instance (bool): Is this buffer per instance data for instanced rendering? Returns: The ``moderngl.Buffer`` instance object. This is handy when providing ``bytes`` and ``numpy.array``. """ if not isinstance(attribute_names, list): attribute_names = [attribute_names, ] if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]: raise VAOError( ( "buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance" "(not {})".format(type(buffer)) ) ) if isinstance(buffer, numpy.ndarray): buffer = self.ctx.buffer(buffer.tobytes()) if isinstance(buffer, bytes): buffer = self.ctx.buffer(data=buffer) formats = buffer_format.split() if len(formats) != len(attribute_names): raise VAOError("Format '{}' does not describe attributes {}".format(buffer_format, attribute_names)) self.buffers.append(BufferInfo(buffer, buffer_format, attribute_names, per_instance=per_instance)) self.vertex_count = self.buffers[-1].vertices return buffer
[ "def", "buffer", "(", "self", ",", "buffer", ",", "buffer_format", ":", "str", ",", "attribute_names", ",", "per_instance", "=", "False", ")", ":", "if", "not", "isinstance", "(", "attribute_names", ",", "list", ")", ":", "attribute_names", "=", "[", "attribute_names", ",", "]", "if", "not", "type", "(", "buffer", ")", "in", "[", "moderngl", ".", "Buffer", ",", "numpy", ".", "ndarray", ",", "bytes", "]", ":", "raise", "VAOError", "(", "(", "\"buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance\"", "\"(not {})\"", ".", "format", "(", "type", "(", "buffer", ")", ")", ")", ")", "if", "isinstance", "(", "buffer", ",", "numpy", ".", "ndarray", ")", ":", "buffer", "=", "self", ".", "ctx", ".", "buffer", "(", "buffer", ".", "tobytes", "(", ")", ")", "if", "isinstance", "(", "buffer", ",", "bytes", ")", ":", "buffer", "=", "self", ".", "ctx", ".", "buffer", "(", "data", "=", "buffer", ")", "formats", "=", "buffer_format", ".", "split", "(", ")", "if", "len", "(", "formats", ")", "!=", "len", "(", "attribute_names", ")", ":", "raise", "VAOError", "(", "\"Format '{}' does not describe attributes {}\"", ".", "format", "(", "buffer_format", ",", "attribute_names", ")", ")", "self", ".", "buffers", ".", "append", "(", "BufferInfo", "(", "buffer", ",", "buffer_format", ",", "attribute_names", ",", "per_instance", "=", "per_instance", ")", ")", "self", ".", "vertex_count", "=", "self", ".", "buffers", "[", "-", "1", "]", ".", "vertices", "return", "buffer" ]
42.146341
29.853659
def bond_task( perc_graph_result, seeds, ps, convolution_factors_tasks_iterator ): """ Perform a number of runs The number of runs is the number of seeds convolution_factors_tasks_iterator needs to be an iterator We shield the convolution factors tasks from jug value/result mechanism by supplying an iterator to the list of tasks for lazy evaluation http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L100 http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L455 """ # restore the list of convolution factors tasks convolution_factors_tasks = list(convolution_factors_tasks_iterator) return reduce( percolate.hpc.bond_reduce, map( bond_run, itertools.repeat(perc_graph_result), seeds, itertools.repeat(ps), itertools.repeat(convolution_factors_tasks), ) )
[ "def", "bond_task", "(", "perc_graph_result", ",", "seeds", ",", "ps", ",", "convolution_factors_tasks_iterator", ")", ":", "# restore the list of convolution factors tasks", "convolution_factors_tasks", "=", "list", "(", "convolution_factors_tasks_iterator", ")", "return", "reduce", "(", "percolate", ".", "hpc", ".", "bond_reduce", ",", "map", "(", "bond_run", ",", "itertools", ".", "repeat", "(", "perc_graph_result", ")", ",", "seeds", ",", "itertools", ".", "repeat", "(", "ps", ")", ",", "itertools", ".", "repeat", "(", "convolution_factors_tasks", ")", ",", ")", ")" ]
32.758621
25.793103
def country_code(self, fmt: Optional[CountryCode] = CountryCode.A2) -> str: """Get a random code of country. Default format is :attr:`~enums.CountryCode.A2` (ISO 3166-1-alpha2), you can change it by passing parameter ``fmt`` with enum object :class:`~enums.CountryCode`. :param fmt: Enum object CountryCode. :return: Country code in selected format. :raises KeyError: if fmt is not supported. """ key = self._validate_enum(fmt, CountryCode) return self.random.choice(COUNTRY_CODES[key])
[ "def", "country_code", "(", "self", ",", "fmt", ":", "Optional", "[", "CountryCode", "]", "=", "CountryCode", ".", "A2", ")", "->", "str", ":", "key", "=", "self", ".", "_validate_enum", "(", "fmt", ",", "CountryCode", ")", "return", "self", ".", "random", ".", "choice", "(", "COUNTRY_CODES", "[", "key", "]", ")" ]
42.769231
17.923077
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack, no_local, queue): """Create a Consume Frame and execute a RPC request. :param str queue: Queue name :param str consumer_tag: Consumer tag :param bool no_local: Do not deliver own messages :param bool no_ack: No acknowledgement needed :param bool exclusive: Request exclusive access :param dict arguments: Consume key/value arguments :rtype: dict """ consume_frame = specification.Basic.Consume(queue=queue, consumer_tag=consumer_tag, exclusive=exclusive, no_local=no_local, no_ack=no_ack, arguments=arguments) return self._channel.rpc_request(consume_frame)
[ "def", "_consume_rpc_request", "(", "self", ",", "arguments", ",", "consumer_tag", ",", "exclusive", ",", "no_ack", ",", "no_local", ",", "queue", ")", ":", "consume_frame", "=", "specification", ".", "Basic", ".", "Consume", "(", "queue", "=", "queue", ",", "consumer_tag", "=", "consumer_tag", ",", "exclusive", "=", "exclusive", ",", "no_local", "=", "no_local", ",", "no_ack", "=", "no_ack", ",", "arguments", "=", "arguments", ")", "return", "self", ".", "_channel", ".", "rpc_request", "(", "consume_frame", ")" ]
49.6
20.45
def do_enable(): """ Uncomment any lines that start with #import in the .pth file """ try: _lines = [] with open(vext_pth, mode='r') as f: for line in f.readlines(): if line.startswith('#') and line[1:].lstrip().startswith('import '): _lines.append(line[1:].lstrip()) else: _lines.append(line) try: os.unlink('%s.tmp' % vext_pth) except: pass with open('%s.tmp' % vext_pth, mode='w+') as f: f.writelines(_lines) try: os.unlink('%s~' % vext_pth) except: pass os.rename(vext_pth, '%s~' % vext_pth) os.rename('%s.tmp' % vext_pth, vext_pth) except IOError as e: if e.errno == 2: # vext file doesn't exist, recreate it. create_pth()
[ "def", "do_enable", "(", ")", ":", "try", ":", "_lines", "=", "[", "]", "with", "open", "(", "vext_pth", ",", "mode", "=", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "and", "line", "[", "1", ":", "]", ".", "lstrip", "(", ")", ".", "startswith", "(", "'import '", ")", ":", "_lines", ".", "append", "(", "line", "[", "1", ":", "]", ".", "lstrip", "(", ")", ")", "else", ":", "_lines", ".", "append", "(", "line", ")", "try", ":", "os", ".", "unlink", "(", "'%s.tmp'", "%", "vext_pth", ")", "except", ":", "pass", "with", "open", "(", "'%s.tmp'", "%", "vext_pth", ",", "mode", "=", "'w+'", ")", "as", "f", ":", "f", ".", "writelines", "(", "_lines", ")", "try", ":", "os", ".", "unlink", "(", "'%s~'", "%", "vext_pth", ")", "except", ":", "pass", "os", ".", "rename", "(", "vext_pth", ",", "'%s~'", "%", "vext_pth", ")", "os", ".", "rename", "(", "'%s.tmp'", "%", "vext_pth", ",", "vext_pth", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "2", ":", "# vext file doesn't exist, recreate it.", "create_pth", "(", ")" ]
27.125
18.5625
def _is_valid_options_weights_list(value): '''Check whether ``values`` is a valid argument for ``weighted_choice``.''' return ((isinstance(value, list)) and len(value) > 1 and (all(isinstance(opt, tuple) and len(opt) == 2 and isinstance(opt[1], (int, float)) for opt in value)))
[ "def", "_is_valid_options_weights_list", "(", "value", ")", ":", "return", "(", "(", "isinstance", "(", "value", ",", "list", ")", ")", "and", "len", "(", "value", ")", ">", "1", "and", "(", "all", "(", "isinstance", "(", "opt", ",", "tuple", ")", "and", "len", "(", "opt", ")", "==", "2", "and", "isinstance", "(", "opt", "[", "1", "]", ",", "(", "int", ",", "float", ")", ")", "for", "opt", "in", "value", ")", ")", ")" ]
44.25
9.25
def download(self, replace=False): """ Download the dataset from the hosted Yellowbrick data store and save it to the location specified by ``get_data_home``. The downloader verifies the download completed successfully and safely by comparing the expected signature with the SHA 256 signature of the downloaded archive file. Parameters ---------- replace : bool, default: False If the data archive already exists, replace the dataset. If this is False and the dataset exists, an exception is raised. """ download_data( self.url, self.signature, data_home=self.data_home, replace=replace, extract=True )
[ "def", "download", "(", "self", ",", "replace", "=", "False", ")", ":", "download_data", "(", "self", ".", "url", ",", "self", ".", "signature", ",", "data_home", "=", "self", ".", "data_home", ",", "replace", "=", "replace", ",", "extract", "=", "True", ")" ]
40.555556
21.555556
def put(self, file): """ Create a new file on github :param file: File to create :return: File or self.ProxyError """ input_ = { "message": file.logs, "author": file.author.dict(), "content": file.base64, "branch": file.branch } uri = "{api}/repos/{origin}/contents/{path}".format( api=self.github_api_url, origin=self.origin, path=file.path ) data = self.request("PUT", uri, data=input_) if data.status_code == 201: file.pushed = True return file else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="put", context={ "uri": uri, "params": input_ } )
[ "def", "put", "(", "self", ",", "file", ")", ":", "input_", "=", "{", "\"message\"", ":", "file", ".", "logs", ",", "\"author\"", ":", "file", ".", "author", ".", "dict", "(", ")", ",", "\"content\"", ":", "file", ".", "base64", ",", "\"branch\"", ":", "file", ".", "branch", "}", "uri", "=", "\"{api}/repos/{origin}/contents/{path}\"", ".", "format", "(", "api", "=", "self", ".", "github_api_url", ",", "origin", "=", "self", ".", "origin", ",", "path", "=", "file", ".", "path", ")", "data", "=", "self", ".", "request", "(", "\"PUT\"", ",", "uri", ",", "data", "=", "input_", ")", "if", "data", ".", "status_code", "==", "201", ":", "file", ".", "pushed", "=", "True", "return", "file", "else", ":", "decoded_data", "=", "json", ".", "loads", "(", "data", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "self", ".", "ProxyError", "(", "data", ".", "status_code", ",", "(", "decoded_data", ",", "\"message\"", ")", ",", "step", "=", "\"put\"", ",", "context", "=", "{", "\"uri\"", ":", "uri", ",", "\"params\"", ":", "input_", "}", ")" ]
29.83871
14.354839
def generate_match_query(field, value, with_operator_and): """Helper for generating a match query. Args: field (six.text_type): The ES field to be queried. value (six.text_type/bool): The value of the query (bool for the case of type-code query ["core: true"]). with_operator_and (bool): Flag that signifies whether to generate the explicit notation of the query, along with '"operator": "and"', so that all tokens of the query value are required to match. Notes: If value is of instance bool, then the shortened version of the match query is generated, at all times. """ parsed_value = None try: parsed_value = json.loads(value.lower()) except (ValueError, TypeError, AttributeError): # Catch all possible exceptions # we are not interested if they will appear pass if isinstance(value, bool): return {'match': {field: value}} elif isinstance(parsed_value, bool): return {'match': {field: value.lower()}} if with_operator_and: return { 'match': { field: { 'query': value, 'operator': 'and' } } } return {'match': {field: value}}
[ "def", "generate_match_query", "(", "field", ",", "value", ",", "with_operator_and", ")", ":", "parsed_value", "=", "None", "try", ":", "parsed_value", "=", "json", ".", "loads", "(", "value", ".", "lower", "(", ")", ")", "except", "(", "ValueError", ",", "TypeError", ",", "AttributeError", ")", ":", "# Catch all possible exceptions", "# we are not interested if they will appear", "pass", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "{", "'match'", ":", "{", "field", ":", "value", "}", "}", "elif", "isinstance", "(", "parsed_value", ",", "bool", ")", ":", "return", "{", "'match'", ":", "{", "field", ":", "value", ".", "lower", "(", ")", "}", "}", "if", "with_operator_and", ":", "return", "{", "'match'", ":", "{", "field", ":", "{", "'query'", ":", "value", ",", "'operator'", ":", "'and'", "}", "}", "}", "return", "{", "'match'", ":", "{", "field", ":", "value", "}", "}" ]
34.638889
24.027778
def _init_taxids(taxid, taxids): """Return taxid set""" ret = set() if taxids is not None: if taxids is True: return True if isinstance(taxids, int): ret.add(taxids) else: ret.update(taxids) if taxid is not None: ret.add(taxid) if not ret: ret.add(9606) # pylint: disable=superfluous-parens print('**NOTE: DEFAULT TAXID STORED FROM gene2go IS 9606 (human)\n') return ret
[ "def", "_init_taxids", "(", "taxid", ",", "taxids", ")", ":", "ret", "=", "set", "(", ")", "if", "taxids", "is", "not", "None", ":", "if", "taxids", "is", "True", ":", "return", "True", "if", "isinstance", "(", "taxids", ",", "int", ")", ":", "ret", ".", "add", "(", "taxids", ")", "else", ":", "ret", ".", "update", "(", "taxids", ")", "if", "taxid", "is", "not", "None", ":", "ret", ".", "add", "(", "taxid", ")", "if", "not", "ret", ":", "ret", ".", "add", "(", "9606", ")", "# pylint: disable=superfluous-parens", "print", "(", "'**NOTE: DEFAULT TAXID STORED FROM gene2go IS 9606 (human)\\n'", ")", "return", "ret" ]
31.411765
13.647059
def run_idle(self): """Run one of the idle callbacks. Returns: True if one was called, False if no idle callback was called. """ if not self.idlers or self.inactive >= len(self.idlers): return False idler = self.idlers.popleft() callback, args, kwds = idler _logging_debug('idler: %s', callback.__name__) res = callback(*args, **kwds) # See add_idle() for the meaning of the callback return value. if res is not None: if res: self.inactive = 0 else: self.inactive += 1 self.idlers.append(idler) else: _logging_debug('idler %s removed', callback.__name__) return True
[ "def", "run_idle", "(", "self", ")", ":", "if", "not", "self", ".", "idlers", "or", "self", ".", "inactive", ">=", "len", "(", "self", ".", "idlers", ")", ":", "return", "False", "idler", "=", "self", ".", "idlers", ".", "popleft", "(", ")", "callback", ",", "args", ",", "kwds", "=", "idler", "_logging_debug", "(", "'idler: %s'", ",", "callback", ".", "__name__", ")", "res", "=", "callback", "(", "*", "args", ",", "*", "*", "kwds", ")", "# See add_idle() for the meaning of the callback return value.", "if", "res", "is", "not", "None", ":", "if", "res", ":", "self", ".", "inactive", "=", "0", "else", ":", "self", ".", "inactive", "+=", "1", "self", ".", "idlers", ".", "append", "(", "idler", ")", "else", ":", "_logging_debug", "(", "'idler %s removed'", ",", "callback", ".", "__name__", ")", "return", "True" ]
29.363636
18.272727
def execute(self, run): """ This function executes the tool with a sourcefile with options. It also calls functions for output before and after the run. """ self.output_handler.output_before_run(run) benchmark = self.benchmark memlimit = benchmark.rlimits.get(MEMLIMIT) args = run.cmdline() logging.debug('Command line of run is %s', args) run_result = \ self.run_executor.execute_run( args, output_filename=run.log_file, output_dir=run.result_files_folder, result_files_patterns=benchmark.result_files_patterns, hardtimelimit=benchmark.rlimits.get(TIMELIMIT), softtimelimit=benchmark.rlimits.get(SOFTTIMELIMIT), walltimelimit=benchmark.rlimits.get(WALLTIMELIMIT), cores=self.my_cpus, memory_nodes=self.my_memory_nodes, memlimit=memlimit, environments=benchmark.environment(), workingDir=benchmark.working_directory(), maxLogfileSize=benchmark.config.maxLogfileSize, files_count_limit=benchmark.config.filesCountLimit, files_size_limit=benchmark.config.filesSizeLimit) if self.run_executor.PROCESS_KILLED: # If the run was interrupted, we ignore the result and cleanup. try: if benchmark.config.debug: os.rename(run.log_file, run.log_file + ".killed") else: os.remove(run.log_file) except OSError: pass return 1 if self.my_cpus: run_result['cpuCores'] = self.my_cpus if self.my_memory_nodes: run_result['memoryNodes'] = self.my_memory_nodes run.set_result(run_result) self.output_handler.output_after_run(run)
[ "def", "execute", "(", "self", ",", "run", ")", ":", "self", ".", "output_handler", ".", "output_before_run", "(", "run", ")", "benchmark", "=", "self", ".", "benchmark", "memlimit", "=", "benchmark", ".", "rlimits", ".", "get", "(", "MEMLIMIT", ")", "args", "=", "run", ".", "cmdline", "(", ")", "logging", ".", "debug", "(", "'Command line of run is %s'", ",", "args", ")", "run_result", "=", "self", ".", "run_executor", ".", "execute_run", "(", "args", ",", "output_filename", "=", "run", ".", "log_file", ",", "output_dir", "=", "run", ".", "result_files_folder", ",", "result_files_patterns", "=", "benchmark", ".", "result_files_patterns", ",", "hardtimelimit", "=", "benchmark", ".", "rlimits", ".", "get", "(", "TIMELIMIT", ")", ",", "softtimelimit", "=", "benchmark", ".", "rlimits", ".", "get", "(", "SOFTTIMELIMIT", ")", ",", "walltimelimit", "=", "benchmark", ".", "rlimits", ".", "get", "(", "WALLTIMELIMIT", ")", ",", "cores", "=", "self", ".", "my_cpus", ",", "memory_nodes", "=", "self", ".", "my_memory_nodes", ",", "memlimit", "=", "memlimit", ",", "environments", "=", "benchmark", ".", "environment", "(", ")", ",", "workingDir", "=", "benchmark", ".", "working_directory", "(", ")", ",", "maxLogfileSize", "=", "benchmark", ".", "config", ".", "maxLogfileSize", ",", "files_count_limit", "=", "benchmark", ".", "config", ".", "filesCountLimit", ",", "files_size_limit", "=", "benchmark", ".", "config", ".", "filesSizeLimit", ")", "if", "self", ".", "run_executor", ".", "PROCESS_KILLED", ":", "# If the run was interrupted, we ignore the result and cleanup.", "try", ":", "if", "benchmark", ".", "config", ".", "debug", ":", "os", ".", "rename", "(", "run", ".", "log_file", ",", "run", ".", "log_file", "+", "\".killed\"", ")", "else", ":", "os", ".", "remove", "(", "run", ".", "log_file", ")", "except", "OSError", ":", "pass", "return", "1", "if", "self", ".", "my_cpus", ":", "run_result", "[", "'cpuCores'", "]", "=", "self", ".", "my_cpus", "if", "self", ".", "my_memory_nodes", ":", "run_result", "[", "'memoryNodes'", "]", "=", "self", ".", "my_memory_nodes", "run", ".", "set_result", "(", "run_result", ")", "self", ".", "output_handler", ".", "output_after_run", "(", "run", ")" ]
39.5625
17.8125
def _send_ffe(self, pid, app_id, app_flags, fr): """Send a flood-fill end packet. The cores and regions that the application should be loaded to will have been specified by a stream of flood-fill core select packets (FFCS). """ arg1 = (NNCommands.flood_fill_end << 24) | pid arg2 = (app_id << 24) | (app_flags << 18) self._send_scp(255, 255, 0, SCPCommands.nearest_neighbour_packet, arg1, arg2, fr)
[ "def", "_send_ffe", "(", "self", ",", "pid", ",", "app_id", ",", "app_flags", ",", "fr", ")", ":", "arg1", "=", "(", "NNCommands", ".", "flood_fill_end", "<<", "24", ")", "|", "pid", "arg2", "=", "(", "app_id", "<<", "24", ")", "|", "(", "app_flags", "<<", "18", ")", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "nearest_neighbour_packet", ",", "arg1", ",", "arg2", ",", "fr", ")" ]
43.272727
18.090909
def _forward_outbound(self, channel): """ Forward outbound traffic (ssh -> websockets) """ try: while True: wait_read(channel.fileno()) data = channel.recv(1024) if not len(data): return self._websocket.send(json.dumps({'data': data})) finally: self.close()
[ "def", "_forward_outbound", "(", "self", ",", "channel", ")", ":", "try", ":", "while", "True", ":", "wait_read", "(", "channel", ".", "fileno", "(", ")", ")", "data", "=", "channel", ".", "recv", "(", "1024", ")", "if", "not", "len", "(", "data", ")", ":", "return", "self", ".", "_websocket", ".", "send", "(", "json", ".", "dumps", "(", "{", "'data'", ":", "data", "}", ")", ")", "finally", ":", "self", ".", "close", "(", ")" ]
34.454545
12.454545
def get_tornado_apps(context, debug=False): """ Create Tornado's application for all interfaces which are defined in the configuration. *context* is instance of the :class:`shelter.core.context.Context`. If *debug* is :const:`True`, server will be run in **DEBUG** mode. Return :class:`list` of the :class:`tornado.web.Application` instances. """ if context.config.app_settings_handler: app_settings_handler = import_object( context.config.app_settings_handler) settings = app_settings_handler(context) else: settings = {} apps = [] for interface in context.config.interfaces: urls = interface.urls if not urls: urls = [tornado.web.URLSpec('/', NullHandler)] apps.append( tornado.web.Application( urls, debug=debug, context=context, interface=interface, **settings ) ) return apps
[ "def", "get_tornado_apps", "(", "context", ",", "debug", "=", "False", ")", ":", "if", "context", ".", "config", ".", "app_settings_handler", ":", "app_settings_handler", "=", "import_object", "(", "context", ".", "config", ".", "app_settings_handler", ")", "settings", "=", "app_settings_handler", "(", "context", ")", "else", ":", "settings", "=", "{", "}", "apps", "=", "[", "]", "for", "interface", "in", "context", ".", "config", ".", "interfaces", ":", "urls", "=", "interface", ".", "urls", "if", "not", "urls", ":", "urls", "=", "[", "tornado", ".", "web", ".", "URLSpec", "(", "'/'", ",", "NullHandler", ")", "]", "apps", ".", "append", "(", "tornado", ".", "web", ".", "Application", "(", "urls", ",", "debug", "=", "debug", ",", "context", "=", "context", ",", "interface", "=", "interface", ",", "*", "*", "settings", ")", ")", "return", "apps" ]
34.777778
16.111111
def launchQueryForMode(self, query=None, mode=None): """ Method that launches an i3Browser to collect data. Args: ----- query: The query to be performed mode: The mode to be used to build the query. Return: ------- A string containing the recovered data or None. """ # Creating the query URL for that mode qURL = self.createURL(word=query, mode=mode) i3Browser = browser.Browser() try: # Check if it needs creds if self.needsCredentials[mode]: self._getAuthenticated(i3Browser, qURL) data = i3Browser.recoverURL(qURL) else: # Accessing the resources data = i3Browser.recoverURL(qURL) return data except KeyError: print(general.error("[*] '{}' is not a valid mode for this wrapper ({}).".format(mode, self.__class__.__name__))) return None
[ "def", "launchQueryForMode", "(", "self", ",", "query", "=", "None", ",", "mode", "=", "None", ")", ":", "# Creating the query URL for that mode", "qURL", "=", "self", ".", "createURL", "(", "word", "=", "query", ",", "mode", "=", "mode", ")", "i3Browser", "=", "browser", ".", "Browser", "(", ")", "try", ":", "# Check if it needs creds", "if", "self", ".", "needsCredentials", "[", "mode", "]", ":", "self", ".", "_getAuthenticated", "(", "i3Browser", ",", "qURL", ")", "data", "=", "i3Browser", ".", "recoverURL", "(", "qURL", ")", "else", ":", "# Accessing the resources", "data", "=", "i3Browser", ".", "recoverURL", "(", "qURL", ")", "return", "data", "except", "KeyError", ":", "print", "(", "general", ".", "error", "(", "\"[*] '{}' is not a valid mode for this wrapper ({}).\"", ".", "format", "(", "mode", ",", "self", ".", "__class__", ".", "__name__", ")", ")", ")", "return", "None" ]
32.566667
19.5
def combination_step(self): """Update auxiliary state by a smart combination of previous updates in the frequency domain (standard FISTA :cite:`beck-2009-fast`). """ # Update t step tprv = self.t self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2)) # Update Y if not self.opt['FastSolve']: self.Yfprv = self.Yf.copy() self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv)
[ "def", "combination_step", "(", "self", ")", ":", "# Update t step", "tprv", "=", "self", ".", "t", "self", ".", "t", "=", "0.5", "*", "float", "(", "1.", "+", "np", ".", "sqrt", "(", "1.", "+", "4.", "*", "tprv", "**", "2", ")", ")", "# Update Y", "if", "not", "self", ".", "opt", "[", "'FastSolve'", "]", ":", "self", ".", "Yfprv", "=", "self", ".", "Yf", ".", "copy", "(", ")", "self", ".", "Yf", "=", "self", ".", "Xf", "+", "(", "(", "tprv", "-", "1.", ")", "/", "self", ".", "t", ")", "*", "(", "self", ".", "Xf", "-", "self", ".", "Xfprv", ")" ]
33.357143
16.714286
def run(self, plugin_manager=None): """Run the haas test runner. This will load and configure the selected plugins, set up the environment and begin test discovery, loading and running. Parameters ---------- plugin_manager : haas.plugin_manager.PluginManager [Optional] Override the use of the default plugin manager. """ if plugin_manager is None: plugin_manager = PluginManager() plugin_manager.add_plugin_arguments(self.parser) args = self.parser.parse_args(self.argv[1:]) environment_plugins = plugin_manager.get_enabled_hook_plugins( plugin_manager.ENVIRONMENT_HOOK, args) runner = plugin_manager.get_driver( plugin_manager.TEST_RUNNER, args) with PluginContext(environment_plugins): loader = Loader() discoverer = plugin_manager.get_driver( plugin_manager.TEST_DISCOVERY, args, loader=loader) suites = [ discoverer.discover( start=start, top_level_directory=args.top_level_directory, pattern=args.pattern, ) for start in args.start ] if len(suites) == 1: suite = suites[0] else: suite = loader.create_suite(suites) test_count = suite.countTestCases() result_handlers = plugin_manager.get_enabled_hook_plugins( plugin_manager.RESULT_HANDLERS, args, test_count=test_count) result_collector = ResultCollector( buffer=args.buffer, failfast=args.failfast) for result_handler in result_handlers: result_collector.add_result_handler(result_handler) result = runner.run(result_collector, suite) return not result.wasSuccessful()
[ "def", "run", "(", "self", ",", "plugin_manager", "=", "None", ")", ":", "if", "plugin_manager", "is", "None", ":", "plugin_manager", "=", "PluginManager", "(", ")", "plugin_manager", ".", "add_plugin_arguments", "(", "self", ".", "parser", ")", "args", "=", "self", ".", "parser", ".", "parse_args", "(", "self", ".", "argv", "[", "1", ":", "]", ")", "environment_plugins", "=", "plugin_manager", ".", "get_enabled_hook_plugins", "(", "plugin_manager", ".", "ENVIRONMENT_HOOK", ",", "args", ")", "runner", "=", "plugin_manager", ".", "get_driver", "(", "plugin_manager", ".", "TEST_RUNNER", ",", "args", ")", "with", "PluginContext", "(", "environment_plugins", ")", ":", "loader", "=", "Loader", "(", ")", "discoverer", "=", "plugin_manager", ".", "get_driver", "(", "plugin_manager", ".", "TEST_DISCOVERY", ",", "args", ",", "loader", "=", "loader", ")", "suites", "=", "[", "discoverer", ".", "discover", "(", "start", "=", "start", ",", "top_level_directory", "=", "args", ".", "top_level_directory", ",", "pattern", "=", "args", ".", "pattern", ",", ")", "for", "start", "in", "args", ".", "start", "]", "if", "len", "(", "suites", ")", "==", "1", ":", "suite", "=", "suites", "[", "0", "]", "else", ":", "suite", "=", "loader", ".", "create_suite", "(", "suites", ")", "test_count", "=", "suite", ".", "countTestCases", "(", ")", "result_handlers", "=", "plugin_manager", ".", "get_enabled_hook_plugins", "(", "plugin_manager", ".", "RESULT_HANDLERS", ",", "args", ",", "test_count", "=", "test_count", ")", "result_collector", "=", "ResultCollector", "(", "buffer", "=", "args", ".", "buffer", ",", "failfast", "=", "args", ".", "failfast", ")", "for", "result_handler", "in", "result_handlers", ":", "result_collector", ".", "add_result_handler", "(", "result_handler", ")", "result", "=", "runner", ".", "run", "(", "result_collector", ",", "suite", ")", "return", "not", "result", ".", "wasSuccessful", "(", ")" ]
36.960784
18.980392
def parseFloat(self, words): """Convert a floating-point number described in words to a double. Supports two kinds of descriptions: those with a 'point' (e.g., "one point two five") and those with a fraction (e.g., "one and a quarter"). Args: words (str): Description of the floating-point number. Returns: A double representation of the words. """ def pointFloat(words): m = re.search(r'(.*) point (.*)', words) if m: whole = m.group(1) frac = m.group(2) total = 0.0 coeff = 0.10 for digit in frac.split(' '): total += coeff * self.parse(digit) coeff /= 10.0 return self.parseInt(whole) + total return None def fractionFloat(words): m = re.search(r'(.*) and (.*)', words) if m: whole = self.parseInt(m.group(1)) frac = m.group(2) # Replace plurals frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac) # Convert 'a' to 'one' (e.g., 'a third' to 'one third') frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac) split = frac.split(' ') # Split fraction into num (regular integer), denom (ordinal) num = split[:1] denom = split[1:] while denom: try: # Test for valid num, denom num_value = self.parse(' '.join(num)) denom_value = self.parse(' '.join(denom)) return whole + float(num_value) / denom_value except: # Add another word to num num += denom[:1] denom = denom[1:] return None # Extract "one point two five"-type float result = pointFloat(words) if result: return result # Extract "one and a quarter"-type float result = fractionFloat(words) if result: return result # Parse as integer return self.parseInt(words)
[ "def", "parseFloat", "(", "self", ",", "words", ")", ":", "def", "pointFloat", "(", "words", ")", ":", "m", "=", "re", ".", "search", "(", "r'(.*) point (.*)'", ",", "words", ")", "if", "m", ":", "whole", "=", "m", ".", "group", "(", "1", ")", "frac", "=", "m", ".", "group", "(", "2", ")", "total", "=", "0.0", "coeff", "=", "0.10", "for", "digit", "in", "frac", ".", "split", "(", "' '", ")", ":", "total", "+=", "coeff", "*", "self", ".", "parse", "(", "digit", ")", "coeff", "/=", "10.0", "return", "self", ".", "parseInt", "(", "whole", ")", "+", "total", "return", "None", "def", "fractionFloat", "(", "words", ")", ":", "m", "=", "re", ".", "search", "(", "r'(.*) and (.*)'", ",", "words", ")", "if", "m", ":", "whole", "=", "self", ".", "parseInt", "(", "m", ".", "group", "(", "1", ")", ")", "frac", "=", "m", ".", "group", "(", "2", ")", "# Replace plurals", "frac", "=", "re", ".", "sub", "(", "r'(\\w+)s(\\b)'", ",", "'\\g<1>\\g<2>'", ",", "frac", ")", "# Convert 'a' to 'one' (e.g., 'a third' to 'one third')", "frac", "=", "re", ".", "sub", "(", "r'(\\b)a(\\b)'", ",", "'\\g<1>one\\g<2>'", ",", "frac", ")", "split", "=", "frac", ".", "split", "(", "' '", ")", "# Split fraction into num (regular integer), denom (ordinal)", "num", "=", "split", "[", ":", "1", "]", "denom", "=", "split", "[", "1", ":", "]", "while", "denom", ":", "try", ":", "# Test for valid num, denom", "num_value", "=", "self", ".", "parse", "(", "' '", ".", "join", "(", "num", ")", ")", "denom_value", "=", "self", ".", "parse", "(", "' '", ".", "join", "(", "denom", ")", ")", "return", "whole", "+", "float", "(", "num_value", ")", "/", "denom_value", "except", ":", "# Add another word to num", "num", "+=", "denom", "[", ":", "1", "]", "denom", "=", "denom", "[", "1", ":", "]", "return", "None", "# Extract \"one point two five\"-type float", "result", "=", "pointFloat", "(", "words", ")", "if", "result", ":", "return", "result", "# Extract \"one and a quarter\"-type float", "result", "=", "fractionFloat", "(", "words", ")", "if", "result", ":", "return", "result", "# Parse as integer", "return", "self", ".", "parseInt", "(", "words", ")" ]
32.376812
18.942029
def _rc_rpoplpush(self, src, dst): """ RPOP a value off of the ``src`` list and LPUSH it on to the ``dst`` list. Returns the value. """ rpop = self.rpop(src) if rpop is not None: self.lpush(dst, rpop) return rpop return None
[ "def", "_rc_rpoplpush", "(", "self", ",", "src", ",", "dst", ")", ":", "rpop", "=", "self", ".", "rpop", "(", "src", ")", "if", "rpop", "is", "not", "None", ":", "self", ".", "lpush", "(", "dst", ",", "rpop", ")", "return", "rpop", "return", "None" ]
29.6
10.2
def log(wave): r""" Return the natural logarithm of a waveform's dependent variable vector. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for peng.wave_functions.log :raises: * RuntimeError (Argument \`wave\` is not valid) * ValueError (Math domain error) .. [[[end]]] """ pexdoc.exh.addex( ValueError, "Math domain error", bool((min(wave._dep_vector) <= 0)) ) return _operation(wave, "log", "", np.log)
[ "def", "log", "(", "wave", ")", ":", "pexdoc", ".", "exh", ".", "addex", "(", "ValueError", ",", "\"Math domain error\"", ",", "bool", "(", "(", "min", "(", "wave", ".", "_dep_vector", ")", "<=", "0", ")", ")", ")", "return", "_operation", "(", "wave", ",", "\"log\"", ",", "\"\"", ",", "np", ".", "log", ")" ]
26.565217
23.304348
def _copy_from(self, rhs): """Copy all data from rhs into this instance, handles usage count""" self._manager = rhs._manager self._rlist = type(rhs._rlist)(rhs._rlist) self._region = rhs._region self._ofs = rhs._ofs self._size = rhs._size for region in self._rlist: region.increment_client_count() if self._region is not None: self._region.increment_client_count()
[ "def", "_copy_from", "(", "self", ",", "rhs", ")", ":", "self", ".", "_manager", "=", "rhs", ".", "_manager", "self", ".", "_rlist", "=", "type", "(", "rhs", ".", "_rlist", ")", "(", "rhs", ".", "_rlist", ")", "self", ".", "_region", "=", "rhs", ".", "_region", "self", ".", "_ofs", "=", "rhs", ".", "_ofs", "self", ".", "_size", "=", "rhs", ".", "_size", "for", "region", "in", "self", ".", "_rlist", ":", "region", ".", "increment_client_count", "(", ")", "if", "self", ".", "_region", "is", "not", "None", ":", "self", ".", "_region", ".", "increment_client_count", "(", ")" ]
34
12.153846
def auth(self, request): """ let's auth the user to the Service :param request: request object :return: callback url :rtype: string that contains the url to redirect after auth """ service = UserService.objects.get(user=request.user, name='ServiceWallabag') callback_url = '%s://%s%s' % (request.scheme, request.get_host(), reverse('wallabag_callback')) params = {'username': service.username, 'password': service.password, 'client_id': service.client_id, 'client_secret': service.client_secret} access_token = Wall.get_token(host=service.host, **params) request.session['oauth_token'] = access_token return callback_url
[ "def", "auth", "(", "self", ",", "request", ")", ":", "service", "=", "UserService", ".", "objects", ".", "get", "(", "user", "=", "request", ".", "user", ",", "name", "=", "'ServiceWallabag'", ")", "callback_url", "=", "'%s://%s%s'", "%", "(", "request", ".", "scheme", ",", "request", ".", "get_host", "(", ")", ",", "reverse", "(", "'wallabag_callback'", ")", ")", "params", "=", "{", "'username'", ":", "service", ".", "username", ",", "'password'", ":", "service", ".", "password", ",", "'client_id'", ":", "service", ".", "client_id", ",", "'client_secret'", ":", "service", ".", "client_secret", "}", "access_token", "=", "Wall", ".", "get_token", "(", "host", "=", "service", ".", "host", ",", "*", "*", "params", ")", "request", ".", "session", "[", "'oauth_token'", "]", "=", "access_token", "return", "callback_url" ]
45.352941
17.705882
def _next_iter_line(self, row_num): """ Wrapper around iterating through `self.data` (CSV source). When a CSV error is raised, we check for specific error messages that allow us to customize the error message displayed to the user. Parameters ---------- row_num : The row number of the line being parsed. """ try: return next(self.data) except csv.Error as e: if self.warn_bad_lines or self.error_bad_lines: msg = str(e) if 'NULL byte' in msg: msg = ('NULL byte detected. This byte ' 'cannot be processed in Python\'s ' 'native csv library at the moment, ' 'so please pass in engine=\'c\' instead') if self.skipfooter > 0: reason = ('Error could possibly be due to ' 'parsing errors in the skipped footer rows ' '(the skipfooter keyword is only applied ' 'after Python\'s csv library has parsed ' 'all rows).') msg += '. ' + reason self._alert_malformed(msg, row_num) return None
[ "def", "_next_iter_line", "(", "self", ",", "row_num", ")", ":", "try", ":", "return", "next", "(", "self", ".", "data", ")", "except", "csv", ".", "Error", "as", "e", ":", "if", "self", ".", "warn_bad_lines", "or", "self", ".", "error_bad_lines", ":", "msg", "=", "str", "(", "e", ")", "if", "'NULL byte'", "in", "msg", ":", "msg", "=", "(", "'NULL byte detected. This byte '", "'cannot be processed in Python\\'s '", "'native csv library at the moment, '", "'so please pass in engine=\\'c\\' instead'", ")", "if", "self", ".", "skipfooter", ">", "0", ":", "reason", "=", "(", "'Error could possibly be due to '", "'parsing errors in the skipped footer rows '", "'(the skipfooter keyword is only applied '", "'after Python\\'s csv library has parsed '", "'all rows).'", ")", "msg", "+=", "'. '", "+", "reason", "self", ".", "_alert_malformed", "(", "msg", ",", "row_num", ")", "return", "None" ]
37.142857
19.657143
def autoargs(include=None, # type: Union[str, Tuple[str]] exclude=None, # type: Union[str, Tuple[str]] f=DECORATED ): """ Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing the function. In other words: ``` @autoargs def myfunc(a): print('hello') ``` will create the equivalent of ``` def myfunc(a): self.a = a print('hello') ``` Initial code from http://stackoverflow.com/questions/3652851/what-is-the-best-way-to-do-automatic-attribute-assignment-in-python-and-is-it-a#answer-3653049 :param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be included by default :param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None :return: """ return autoargs_decorate(f, include=include, exclude=exclude)
[ "def", "autoargs", "(", "include", "=", "None", ",", "# type: Union[str, Tuple[str]]", "exclude", "=", "None", ",", "# type: Union[str, Tuple[str]]", "f", "=", "DECORATED", ")", ":", "return", "autoargs_decorate", "(", "f", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ")" ]
33.4
31.666667
def ipa_chars(self, value): """ Set the list of IPAChar objects composing the IPA string :param list value: list of IPAChar objects """ if value is None: self.__ipa_chars = [] else: if is_list_of_ipachars(value): self.__ipa_chars = value else: raise TypeError("ipa_chars only accepts a list of IPAChar objects")
[ "def", "ipa_chars", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "self", ".", "__ipa_chars", "=", "[", "]", "else", ":", "if", "is_list_of_ipachars", "(", "value", ")", ":", "self", ".", "__ipa_chars", "=", "value", "else", ":", "raise", "TypeError", "(", "\"ipa_chars only accepts a list of IPAChar objects\"", ")" ]
32
15.692308
def _value_format(self, value): """Format value for dual value display.""" return super(VerticalPyramid, self)._value_format(value and abs(value))
[ "def", "_value_format", "(", "self", ",", "value", ")", ":", "return", "super", "(", "VerticalPyramid", ",", "self", ")", ".", "_value_format", "(", "value", "and", "abs", "(", "value", ")", ")" ]
53.333333
16
def getheaders(self, name): """Get all values for a header. This returns a list of values for headers given more than once; each value in the result list is stripped in the same way as the result of getheader(). If the header is not given, return an empty list. """ result = [] current = '' have_header = 0 for s in self.getallmatchingheaders(name): if s[0].isspace(): if current: current = "%s\n %s" % (current, s.strip()) else: current = s.strip() else: if have_header: result.append(current) current = s[s.find(":") + 1:].strip() have_header = 1 if have_header: result.append(current) return result
[ "def", "getheaders", "(", "self", ",", "name", ")", ":", "result", "=", "[", "]", "current", "=", "''", "have_header", "=", "0", "for", "s", "in", "self", ".", "getallmatchingheaders", "(", "name", ")", ":", "if", "s", "[", "0", "]", ".", "isspace", "(", ")", ":", "if", "current", ":", "current", "=", "\"%s\\n %s\"", "%", "(", "current", ",", "s", ".", "strip", "(", ")", ")", "else", ":", "current", "=", "s", ".", "strip", "(", ")", "else", ":", "if", "have_header", ":", "result", ".", "append", "(", "current", ")", "current", "=", "s", "[", "s", ".", "find", "(", "\":\"", ")", "+", "1", ":", "]", ".", "strip", "(", ")", "have_header", "=", "1", "if", "have_header", ":", "result", ".", "append", "(", "current", ")", "return", "result" ]
35.166667
16.166667
async def read(self) -> bytes: """Read request body if present. Returns bytes object with full request content. """ if self._read_bytes is None: body = bytearray() while True: chunk = await self._payload.readany() body.extend(chunk) if self._client_max_size: body_size = len(body) if body_size >= self._client_max_size: raise HTTPRequestEntityTooLarge( max_size=self._client_max_size, actual_size=body_size ) if not chunk: break self._read_bytes = bytes(body) return self._read_bytes
[ "async", "def", "read", "(", "self", ")", "->", "bytes", ":", "if", "self", ".", "_read_bytes", "is", "None", ":", "body", "=", "bytearray", "(", ")", "while", "True", ":", "chunk", "=", "await", "self", ".", "_payload", ".", "readany", "(", ")", "body", ".", "extend", "(", "chunk", ")", "if", "self", ".", "_client_max_size", ":", "body_size", "=", "len", "(", "body", ")", "if", "body_size", ">=", "self", ".", "_client_max_size", ":", "raise", "HTTPRequestEntityTooLarge", "(", "max_size", "=", "self", ".", "_client_max_size", ",", "actual_size", "=", "body_size", ")", "if", "not", "chunk", ":", "break", "self", ".", "_read_bytes", "=", "bytes", "(", "body", ")", "return", "self", ".", "_read_bytes" ]
36.571429
11
def get_representative_json(file_input=None, formatted=False, annotate_is_json=False, sampling_substitution_regex=('(.+)', '\\1_sample'), do_not_sample=('sqlite_stat1', ), sampling_limits=None): """ :param None|str file_input: :param bool formatted: :param bool annotate_is_json: :param tuple sampling_substitution_regex: to shorten string by one, try ('(.+).{1}', '\\1') or ('(.+)s', '\\1') :param list|tuple do_not_sample: :param None|dict sampling_limits: :return: """ if file_input is None: file_input = get_collection_path() source = file_input if sampling_limits is None: sampling_limits = { 'notes': 10, 'cards': 10 } if os.path.splitext(file_input)[1] == '.apkg': from AnkiTools.convert import anki_convert tempdir = mkdtemp() temp_anki2 = os.path.join(tempdir, 'temp.anki2') anki_convert(file_input, out_file=temp_anki2) file_input = temp_anki2 output_json = OrderedDict( _meta=OrderedDict( generated=datetime.fromtimestamp(datetime.now().timestamp()).isoformat(), source=os.path.abspath(source), data=OrderedDict() ) ) with sqlite3.connect(file_input) as conn: cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';") for row in cursor: table_name = row[0] key = table_name output = list(read_anki_table(conn, table_name)) if table_name not in output_json['_meta']['data'].keys(): output_json['_meta']['data'][table_name] = OrderedDict() output_json['_meta']['data'][table_name]['number_of_entries'] = len(output) if len(output) >= 1: if len(output) > 1: if table_name in do_not_sample: output_json[key] = output else: re_match, re_replace = sampling_substitution_regex key = re.sub(re_match, re_replace, key) output_json[key] = random.sample(output, sampling_limits.get(table_name, 10)) else: output_json[key] = output[0] if formatted: to_format = output_json[key] if isinstance(output_json[key], (dict, OrderedDict)): _format_representative_json(to_format, annotate_is_json) else: for item in to_format: _format_representative_json(item, annotate_is_json) else: output_json[key] = None return output_json
[ "def", "get_representative_json", "(", "file_input", "=", "None", ",", "formatted", "=", "False", ",", "annotate_is_json", "=", "False", ",", "sampling_substitution_regex", "=", "(", "'(.+)'", ",", "'\\\\1_sample'", ")", ",", "do_not_sample", "=", "(", "'sqlite_stat1'", ",", ")", ",", "sampling_limits", "=", "None", ")", ":", "if", "file_input", "is", "None", ":", "file_input", "=", "get_collection_path", "(", ")", "source", "=", "file_input", "if", "sampling_limits", "is", "None", ":", "sampling_limits", "=", "{", "'notes'", ":", "10", ",", "'cards'", ":", "10", "}", "if", "os", ".", "path", ".", "splitext", "(", "file_input", ")", "[", "1", "]", "==", "'.apkg'", ":", "from", "AnkiTools", ".", "convert", "import", "anki_convert", "tempdir", "=", "mkdtemp", "(", ")", "temp_anki2", "=", "os", ".", "path", ".", "join", "(", "tempdir", ",", "'temp.anki2'", ")", "anki_convert", "(", "file_input", ",", "out_file", "=", "temp_anki2", ")", "file_input", "=", "temp_anki2", "output_json", "=", "OrderedDict", "(", "_meta", "=", "OrderedDict", "(", "generated", "=", "datetime", ".", "fromtimestamp", "(", "datetime", ".", "now", "(", ")", ".", "timestamp", "(", ")", ")", ".", "isoformat", "(", ")", ",", "source", "=", "os", ".", "path", ".", "abspath", "(", "source", ")", ",", "data", "=", "OrderedDict", "(", ")", ")", ")", "with", "sqlite3", ".", "connect", "(", "file_input", ")", "as", "conn", ":", "cursor", "=", "conn", ".", "execute", "(", "\"SELECT name FROM sqlite_master WHERE type='table';\"", ")", "for", "row", "in", "cursor", ":", "table_name", "=", "row", "[", "0", "]", "key", "=", "table_name", "output", "=", "list", "(", "read_anki_table", "(", "conn", ",", "table_name", ")", ")", "if", "table_name", "not", "in", "output_json", "[", "'_meta'", "]", "[", "'data'", "]", ".", "keys", "(", ")", ":", "output_json", "[", "'_meta'", "]", "[", "'data'", "]", "[", "table_name", "]", "=", "OrderedDict", "(", ")", "output_json", "[", "'_meta'", "]", "[", "'data'", "]", "[", "table_name", "]", "[", "'number_of_entries'", "]", "=", "len", "(", "output", ")", "if", "len", "(", "output", ")", ">=", "1", ":", "if", "len", "(", "output", ")", ">", "1", ":", "if", "table_name", "in", "do_not_sample", ":", "output_json", "[", "key", "]", "=", "output", "else", ":", "re_match", ",", "re_replace", "=", "sampling_substitution_regex", "key", "=", "re", ".", "sub", "(", "re_match", ",", "re_replace", ",", "key", ")", "output_json", "[", "key", "]", "=", "random", ".", "sample", "(", "output", ",", "sampling_limits", ".", "get", "(", "table_name", ",", "10", ")", ")", "else", ":", "output_json", "[", "key", "]", "=", "output", "[", "0", "]", "if", "formatted", ":", "to_format", "=", "output_json", "[", "key", "]", "if", "isinstance", "(", "output_json", "[", "key", "]", ",", "(", "dict", ",", "OrderedDict", ")", ")", ":", "_format_representative_json", "(", "to_format", ",", "annotate_is_json", ")", "else", ":", "for", "item", "in", "to_format", ":", "_format_representative_json", "(", "item", ",", "annotate_is_json", ")", "else", ":", "output_json", "[", "key", "]", "=", "None", "return", "output_json" ]
36.368421
21.815789
def register_plugins(): """find any installed plugins and register them.""" if pkg_resources: # pragma: no cover for ep in pkg_resources.iter_entry_points('slam_plugins'): plugin = ep.load() # add any init options to the main init command if hasattr(plugin, 'init') and hasattr(plugin.init, '_arguments'): for arg in plugin.init._arguments: init.parser.add_argument(*arg[0], **arg[1]) init._arguments += plugin.init._arguments init._argnames += plugin.init._argnames plugins[ep.name] = plugin
[ "def", "register_plugins", "(", ")", ":", "if", "pkg_resources", ":", "# pragma: no cover", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "'slam_plugins'", ")", ":", "plugin", "=", "ep", ".", "load", "(", ")", "# add any init options to the main init command", "if", "hasattr", "(", "plugin", ",", "'init'", ")", "and", "hasattr", "(", "plugin", ".", "init", ",", "'_arguments'", ")", ":", "for", "arg", "in", "plugin", ".", "init", ".", "_arguments", ":", "init", ".", "parser", ".", "add_argument", "(", "*", "arg", "[", "0", "]", ",", "*", "*", "arg", "[", "1", "]", ")", "init", ".", "_arguments", "+=", "plugin", ".", "init", ".", "_arguments", "init", ".", "_argnames", "+=", "plugin", ".", "init", ".", "_argnames", "plugins", "[", "ep", ".", "name", "]", "=", "plugin" ]
43.857143
18.5
def _render_headers(self): """ Write the headers row """ headers = getattr(self, 'headers', ()) for index, col in enumerate(headers): # We write the headers cell = self.worksheet.cell(row=1, column=index + 1) cell.value = col['label'] index += 1 extra_headers = getattr(self, 'extra_headers', ()) for add_index, col in enumerate(extra_headers): cell = self.worksheet.cell(row=1, column=add_index + index + 1) cell.value = col['label']
[ "def", "_render_headers", "(", "self", ")", ":", "headers", "=", "getattr", "(", "self", ",", "'headers'", ",", "(", ")", ")", "for", "index", ",", "col", "in", "enumerate", "(", "headers", ")", ":", "# We write the headers", "cell", "=", "self", ".", "worksheet", ".", "cell", "(", "row", "=", "1", ",", "column", "=", "index", "+", "1", ")", "cell", ".", "value", "=", "col", "[", "'label'", "]", "index", "+=", "1", "extra_headers", "=", "getattr", "(", "self", ",", "'extra_headers'", ",", "(", ")", ")", "for", "add_index", ",", "col", "in", "enumerate", "(", "extra_headers", ")", ":", "cell", "=", "self", ".", "worksheet", ".", "cell", "(", "row", "=", "1", ",", "column", "=", "add_index", "+", "index", "+", "1", ")", "cell", ".", "value", "=", "col", "[", "'label'", "]" ]
34.0625
15.0625
def connect(config_file=qcs.default_filename, section='info', remember_me=False, remember_me_always=False): """ Return a QGAPIConnect object for v1 API pulling settings from config file. """ # Retrieve login credentials. conf = qcconf.QualysConnectConfig(filename=config_file, section=section, remember_me=remember_me, remember_me_always=remember_me_always) connect = qcconn.QGConnector(conf.get_auth(), conf.get_hostname(), conf.proxies, conf.max_retries) logger.info("Finished building connector.") return connect
[ "def", "connect", "(", "config_file", "=", "qcs", ".", "default_filename", ",", "section", "=", "'info'", ",", "remember_me", "=", "False", ",", "remember_me_always", "=", "False", ")", ":", "# Retrieve login credentials.", "conf", "=", "qcconf", ".", "QualysConnectConfig", "(", "filename", "=", "config_file", ",", "section", "=", "section", ",", "remember_me", "=", "remember_me", ",", "remember_me_always", "=", "remember_me_always", ")", "connect", "=", "qcconn", ".", "QGConnector", "(", "conf", ".", "get_auth", "(", ")", ",", "conf", ".", "get_hostname", "(", ")", ",", "conf", ".", "proxies", ",", "conf", ".", "max_retries", ")", "logger", ".", "info", "(", "\"Finished building connector.\"", ")", "return", "connect" ]
51.692308
20.692308
def balanceSheetDF(symbol, token='', version=''): '''Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years) https://iexcloud.io/docs/api/#balance-sheet Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' val = balanceSheet(symbol, token, version) df = pd.io.json.json_normalize(val, 'balancesheet', 'symbol') _toDatetime(df) _reindex(df, 'reportDate') return df
[ "def", "balanceSheetDF", "(", "symbol", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "val", "=", "balanceSheet", "(", "symbol", ",", "token", ",", "version", ")", "df", "=", "pd", ".", "io", ".", "json", ".", "json_normalize", "(", "val", ",", "'balancesheet'", ",", "'symbol'", ")", "_toDatetime", "(", "df", ")", "_reindex", "(", "df", ",", "'reportDate'", ")", "return", "df" ]
27.9
21.8
def runs(self): """Instance depends on the API version: * 2018-09-01: :class:`RunsOperations<azure.mgmt.containerregistry.v2018_09_01.operations.RunsOperations>` """ api_version = self._get_api_version('runs') if api_version == '2018-09-01': from .v2018_09_01.operations import RunsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "runs", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'runs'", ")", "if", "api_version", "==", "'2018-09-01'", ":", "from", ".", "v2018_09_01", ".", "operations", "import", "RunsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
55.363636
34.272727
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search): """Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search""" # Implemented from azosid template for - # osid.resource.ResourceSearchSession.get_resources_by_search_template if not self._can('search'): raise PermissionDenied() return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search)
[ "def", "get_sequence_rule_enablers_by_search", "(", "self", ",", "sequence_rule_enabler_query", ",", "sequence_rule_enabler_search", ")", ":", "# Implemented from azosid template for -", "# osid.resource.ResourceSearchSession.get_resources_by_search_template", "if", "not", "self", ".", "_can", "(", "'search'", ")", ":", "raise", "PermissionDenied", "(", ")", "return", "self", ".", "_provider_session", ".", "get_sequence_rule_enablers_by_search", "(", "sequence_rule_enabler_query", ",", "sequence_rule_enabler_search", ")" ]
77.714286
30.571429
def stop(self): """ Stop ZMQ tools. :return: self """ LOGGER.debug("zeromq.Driver.stop") for publisher in self.publishers_registry: publisher.stop() self.publishers_registry.clear() for subscriber in self.subscribers_registry: if subscriber.is_started: subscriber.stop() self.subscribers_registry.clear() # pykka.ActorRegistry.stop_all() return self
[ "def", "stop", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"zeromq.Driver.stop\"", ")", "for", "publisher", "in", "self", ".", "publishers_registry", ":", "publisher", ".", "stop", "(", ")", "self", ".", "publishers_registry", ".", "clear", "(", ")", "for", "subscriber", "in", "self", ".", "subscribers_registry", ":", "if", "subscriber", ".", "is_started", ":", "subscriber", ".", "stop", "(", ")", "self", ".", "subscribers_registry", ".", "clear", "(", ")", "# pykka.ActorRegistry.stop_all()", "return", "self" ]
30.866667
8.6
def graph_png(self): """ Export a graph of the data in png format using graphviz/dot. """ if not self.out_file: ui.error(c.MESSAGES["png_missing_out"]) sys.exit(1) cli_flags = "-Gsize='{0}' -Gdpi='{1}' {2} ".format(self.size, self.dpi, self.flags) cli_flags += "-o {0}".format(self.out_file) (out, err) = utils.capture_shell( "ansigenome export -t graph -f dot | dot -Tpng {0}" .format(cli_flags)) if err: ui.error(err)
[ "def", "graph_png", "(", "self", ")", ":", "if", "not", "self", ".", "out_file", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"png_missing_out\"", "]", ")", "sys", ".", "exit", "(", "1", ")", "cli_flags", "=", "\"-Gsize='{0}' -Gdpi='{1}' {2} \"", ".", "format", "(", "self", ".", "size", ",", "self", ".", "dpi", ",", "self", ".", "flags", ")", "cli_flags", "+=", "\"-o {0}\"", ".", "format", "(", "self", ".", "out_file", ")", "(", "out", ",", "err", ")", "=", "utils", ".", "capture_shell", "(", "\"ansigenome export -t graph -f dot | dot -Tpng {0}\"", ".", "format", "(", "cli_flags", ")", ")", "if", "err", ":", "ui", ".", "error", "(", "err", ")" ]
32.666667
20
def buscar_por_id(self, id_ambiente): """Obtém um ambiente a partir da chave primária (identificador). :param id_ambiente: Identificador do ambiente. :return: Dicionário com a seguinte estrutura: :: {'ambiente': {'id': < id_ambiente >, 'link': < link >, 'id_divisao': < id_divisao >, 'nome_divisao': < nome_divisao >, 'id_ambiente_logico': < id_ambiente_logico >, 'nome_ambiente_logico': < nome_ambiente_logico >, 'id_grupo_l3': < id_grupo_l3 >, 'nome_grupo_l3': < nome_grupo_l3 >, 'id_filter': < id_filter >, 'filter_name': < filter_name >, 'acl_path': < acl_path >, 'ipv4_template': < ipv4_template >, 'ipv6_template': < ipv6_template >, 'ambiente_rede': < ambiente_rede >}} :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidParameterError: Identificador do ambiente é nulo ou inválido. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_ambiente): raise InvalidParameterError( u'O identificador do ambiente é inválido ou não foi informado.') url = 'environment/id/' + str(id_ambiente) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
[ "def", "buscar_por_id", "(", "self", ",", "id_ambiente", ")", ":", "if", "not", "is_valid_int_param", "(", "id_ambiente", ")", ":", "raise", "InvalidParameterError", "(", "u'O identificador do ambiente é inválido ou não foi informado.')", "", "url", "=", "'environment/id/'", "+", "str", "(", "id_ambiente", ")", "+", "'/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
38.842105
18.184211
def security_rule_create_or_update(name, access, direction, priority, protocol, security_group, resource_group, source_address_prefix=None, destination_address_prefix=None, source_port_range=None, destination_port_range=None, source_address_prefixes=None, destination_address_prefixes=None, source_port_ranges=None, destination_port_ranges=None, **kwargs): ''' .. versionadded:: 2019.2.0 Create or update a security rule within a specified network security group. :param name: The name of the security rule to create. :param access: 'allow' or 'deny' :param direction: 'inbound' or 'outbound' :param priority: Integer between 100 and 4096 used for ordering rule application. :param protocol: 'tcp', 'udp', or '*' :param destination_address_prefix: The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param source_address_prefix: The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param destination_address_prefixes: A list of destination_address_prefix values. This parameter overrides destination_address_prefix and will cause any value entered there to be ignored. :param destination_port_ranges: A list of destination_port_range values. This parameter overrides destination_port_range and will cause any value entered there to be ignored. :param source_address_prefixes: A list of source_address_prefix values. This parameter overrides source_address_prefix and will cause any value entered there to be ignored. :param source_port_ranges: A list of source_port_range values. This parameter overrides source_port_range and will cause any value entered there to be ignored. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \ source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \ destination_port_range='1-1024' ''' exclusive_params = [ ('source_port_ranges', 'source_port_range'), ('source_address_prefixes', 'source_address_prefix'), ('destination_port_ranges', 'destination_port_range'), ('destination_address_prefixes', 'destination_address_prefix'), ] for params in exclusive_params: # pylint: disable=eval-used if not eval(params[0]) and not eval(params[1]): log.error( 'Either the %s or %s parameter must be provided!', params[0], params[1] ) return False # pylint: disable=eval-used if eval(params[0]): # pylint: disable=exec-used exec('{0} = None'.format(params[1])) netconn = __utils__['azurearm.get_client']('network', **kwargs) try: rulemodel = __utils__['azurearm.create_object_model']( 'network', 'SecurityRule', name=name, access=access, direction=direction, priority=priority, protocol=protocol, source_port_ranges=source_port_ranges, source_port_range=source_port_range, source_address_prefixes=source_address_prefixes, source_address_prefix=source_address_prefix, destination_port_ranges=destination_port_ranges, destination_port_range=destination_port_range, destination_address_prefixes=destination_address_prefixes, destination_address_prefix=destination_address_prefix, **kwargs ) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: secrule = netconn.security_rules.create_or_update( resource_group_name=resource_group, network_security_group_name=security_group, security_rule_name=name, security_rule_parameters=rulemodel ) secrule.wait() secrule_result = secrule.result() result = secrule_result.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
[ "def", "security_rule_create_or_update", "(", "name", ",", "access", ",", "direction", ",", "priority", ",", "protocol", ",", "security_group", ",", "resource_group", ",", "source_address_prefix", "=", "None", ",", "destination_address_prefix", "=", "None", ",", "source_port_range", "=", "None", ",", "destination_port_range", "=", "None", ",", "source_address_prefixes", "=", "None", ",", "destination_address_prefixes", "=", "None", ",", "source_port_ranges", "=", "None", ",", "destination_port_ranges", "=", "None", ",", "*", "*", "kwargs", ")", ":", "exclusive_params", "=", "[", "(", "'source_port_ranges'", ",", "'source_port_range'", ")", ",", "(", "'source_address_prefixes'", ",", "'source_address_prefix'", ")", ",", "(", "'destination_port_ranges'", ",", "'destination_port_range'", ")", ",", "(", "'destination_address_prefixes'", ",", "'destination_address_prefix'", ")", ",", "]", "for", "params", "in", "exclusive_params", ":", "# pylint: disable=eval-used", "if", "not", "eval", "(", "params", "[", "0", "]", ")", "and", "not", "eval", "(", "params", "[", "1", "]", ")", ":", "log", ".", "error", "(", "'Either the %s or %s parameter must be provided!'", ",", "params", "[", "0", "]", ",", "params", "[", "1", "]", ")", "return", "False", "# pylint: disable=eval-used", "if", "eval", "(", "params", "[", "0", "]", ")", ":", "# pylint: disable=exec-used", "exec", "(", "'{0} = None'", ".", "format", "(", "params", "[", "1", "]", ")", ")", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "rulemodel", "=", "__utils__", "[", "'azurearm.create_object_model'", "]", "(", "'network'", ",", "'SecurityRule'", ",", "name", "=", "name", ",", "access", "=", "access", ",", "direction", "=", "direction", ",", "priority", "=", "priority", ",", "protocol", "=", "protocol", ",", "source_port_ranges", "=", "source_port_ranges", ",", "source_port_range", "=", "source_port_range", ",", "source_address_prefixes", "=", "source_address_prefixes", ",", "source_address_prefix", "=", "source_address_prefix", ",", "destination_port_ranges", "=", "destination_port_ranges", ",", "destination_port_range", "=", "destination_port_range", ",", "destination_address_prefixes", "=", "destination_address_prefixes", ",", "destination_address_prefix", "=", "destination_address_prefix", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "exc", ":", "result", "=", "{", "'error'", ":", "'The object model could not be built. ({0})'", ".", "format", "(", "str", "(", "exc", ")", ")", "}", "return", "result", "try", ":", "secrule", "=", "netconn", ".", "security_rules", ".", "create_or_update", "(", "resource_group_name", "=", "resource_group", ",", "network_security_group_name", "=", "security_group", ",", "security_rule_name", "=", "name", ",", "security_rule_parameters", "=", "rulemodel", ")", "secrule", ".", "wait", "(", ")", "secrule_result", "=", "secrule", ".", "result", "(", ")", "result", "=", "secrule_result", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "except", "SerializationError", "as", "exc", ":", "result", "=", "{", "'error'", ":", "'The object model could not be parsed. ({0})'", ".", "format", "(", "str", "(", "exc", ")", ")", "}", "return", "result" ]
40.266667
26.666667
def filter_filenames_by_info(self, filename_items): """Filter out file using metadata from the filenames. This sorts out the different lon and lat datasets depending on TC is desired or not. """ filename_items = list(filename_items) geo_keep = [] geo_del = [] for filename, filename_info in filename_items: filename_info['datasets'] = datasets = filename_info['datasets'].split('-') if ('GITCO' in datasets) or ('GMTCO' in datasets): if self.use_tc is False: geo_del.append(filename) else: geo_keep.append(filename) elif ('GIMGO' in datasets) or ('GMODO' in datasets): if self.use_tc is True: geo_del.append(filename) else: geo_keep.append(filename) if geo_keep: fdict = dict(filename_items) for to_del in geo_del: for dataset in ['GITCO', 'GMTCO', 'GIMGO', 'GMODO']: try: fdict[to_del]['datasets'].remove(dataset) except ValueError: pass if not fdict[to_del]['datasets']: del fdict[to_del] filename_items = fdict.items() for filename, filename_info in filename_items: filename_info['datasets'] = '-'.join(filename_info['datasets']) return super(VIIRSSDRReader, self).filter_filenames_by_info(filename_items)
[ "def", "filter_filenames_by_info", "(", "self", ",", "filename_items", ")", ":", "filename_items", "=", "list", "(", "filename_items", ")", "geo_keep", "=", "[", "]", "geo_del", "=", "[", "]", "for", "filename", ",", "filename_info", "in", "filename_items", ":", "filename_info", "[", "'datasets'", "]", "=", "datasets", "=", "filename_info", "[", "'datasets'", "]", ".", "split", "(", "'-'", ")", "if", "(", "'GITCO'", "in", "datasets", ")", "or", "(", "'GMTCO'", "in", "datasets", ")", ":", "if", "self", ".", "use_tc", "is", "False", ":", "geo_del", ".", "append", "(", "filename", ")", "else", ":", "geo_keep", ".", "append", "(", "filename", ")", "elif", "(", "'GIMGO'", "in", "datasets", ")", "or", "(", "'GMODO'", "in", "datasets", ")", ":", "if", "self", ".", "use_tc", "is", "True", ":", "geo_del", ".", "append", "(", "filename", ")", "else", ":", "geo_keep", ".", "append", "(", "filename", ")", "if", "geo_keep", ":", "fdict", "=", "dict", "(", "filename_items", ")", "for", "to_del", "in", "geo_del", ":", "for", "dataset", "in", "[", "'GITCO'", ",", "'GMTCO'", ",", "'GIMGO'", ",", "'GMODO'", "]", ":", "try", ":", "fdict", "[", "to_del", "]", "[", "'datasets'", "]", ".", "remove", "(", "dataset", ")", "except", "ValueError", ":", "pass", "if", "not", "fdict", "[", "to_del", "]", "[", "'datasets'", "]", ":", "del", "fdict", "[", "to_del", "]", "filename_items", "=", "fdict", ".", "items", "(", ")", "for", "filename", ",", "filename_info", "in", "filename_items", ":", "filename_info", "[", "'datasets'", "]", "=", "'-'", ".", "join", "(", "filename_info", "[", "'datasets'", "]", ")", "return", "super", "(", "VIIRSSDRReader", ",", "self", ")", ".", "filter_filenames_by_info", "(", "filename_items", ")" ]
43.742857
15.057143
def interpret_header(self): """ Read pertinent information from the image headers, especially location and radius of the Sun to calculate the default thematic map :return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel """ # handle special cases since date-obs field changed names if 'DATE_OBS' in self.header: self.date = self.header['DATE_OBS'] elif 'DATE-OBS' in self.header: self.date = self.header['DATE-OBS'] else: raise Exception("Image does not have a DATE_OBS or DATE-OBS field") self.cy, self.cx = self.header['CRPIX1'], self.header['CRPIX2'] sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec arcsec_per_pixel = self.header['CDELT1'] self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel)
[ "def", "interpret_header", "(", "self", ")", ":", "# handle special cases since date-obs field changed names", "if", "'DATE_OBS'", "in", "self", ".", "header", ":", "self", ".", "date", "=", "self", ".", "header", "[", "'DATE_OBS'", "]", "elif", "'DATE-OBS'", "in", "self", ".", "header", ":", "self", ".", "date", "=", "self", ".", "header", "[", "'DATE-OBS'", "]", "else", ":", "raise", "Exception", "(", "\"Image does not have a DATE_OBS or DATE-OBS field\"", ")", "self", ".", "cy", ",", "self", ".", "cx", "=", "self", ".", "header", "[", "'CRPIX1'", "]", ",", "self", ".", "header", "[", "'CRPIX2'", "]", "sun_radius_angular", "=", "sun", ".", "solar_semidiameter_angular_size", "(", "t", "=", "time", ".", "parse_time", "(", "self", ".", "date", ")", ")", ".", "arcsec", "arcsec_per_pixel", "=", "self", ".", "header", "[", "'CDELT1'", "]", "self", ".", "sun_radius_pixel", "=", "(", "sun_radius_angular", "/", "arcsec_per_pixel", ")" ]
49.444444
22
def get_revision_sha(self, dest, rev): """ Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. Args: dest: the repository directory. rev: the revision name. """ # Pass rev to pre-filter the list. output = self.run_command(['show-ref', rev], cwd=dest, show_stdout=False, on_returncode='ignore') refs = {} for line in output.strip().splitlines(): try: sha, ref = line.split() except ValueError: # Include the offending line to simplify troubleshooting if # this error ever occurs. raise ValueError('unexpected show-ref line: {!r}'.format(line)) refs[ref] = sha branch_ref = 'refs/remotes/origin/{}'.format(rev) tag_ref = 'refs/tags/{}'.format(rev) sha = refs.get(branch_ref) if sha is not None: return (sha, True) sha = refs.get(tag_ref) return (sha, False)
[ "def", "get_revision_sha", "(", "self", ",", "dest", ",", "rev", ")", ":", "# Pass rev to pre-filter the list.", "output", "=", "self", ".", "run_command", "(", "[", "'show-ref'", ",", "rev", "]", ",", "cwd", "=", "dest", ",", "show_stdout", "=", "False", ",", "on_returncode", "=", "'ignore'", ")", "refs", "=", "{", "}", "for", "line", "in", "output", ".", "strip", "(", ")", ".", "splitlines", "(", ")", ":", "try", ":", "sha", ",", "ref", "=", "line", ".", "split", "(", ")", "except", "ValueError", ":", "# Include the offending line to simplify troubleshooting if", "# this error ever occurs.", "raise", "ValueError", "(", "'unexpected show-ref line: {!r}'", ".", "format", "(", "line", ")", ")", "refs", "[", "ref", "]", "=", "sha", "branch_ref", "=", "'refs/remotes/origin/{}'", ".", "format", "(", "rev", ")", "tag_ref", "=", "'refs/tags/{}'", ".", "format", "(", "rev", ")", "sha", "=", "refs", ".", "get", "(", "branch_ref", ")", "if", "sha", "is", "not", "None", ":", "return", "(", "sha", ",", "True", ")", "sha", "=", "refs", ".", "get", "(", "tag_ref", ")", "return", "(", "sha", ",", "False", ")" ]
33.121212
19
def fromProfile(cls, profile): """Return an `Origin` from a given configuration profile. :see: `ProfileStore`. """ session = bones.SessionAPI.fromProfile(profile) return cls(session)
[ "def", "fromProfile", "(", "cls", ",", "profile", ")", ":", "session", "=", "bones", ".", "SessionAPI", ".", "fromProfile", "(", "profile", ")", "return", "cls", "(", "session", ")" ]
31
12.714286
def update_stock_codes(): """获取所有股票 ID 到 all_stock_code 目录下""" all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js" grep_stock_codes = re.compile(r"~(\d+)`") response = requests.get(all_stock_codes_url) all_stock_codes = grep_stock_codes.findall(response.text) with open(stock_code_path(), "w") as f: f.write(json.dumps(dict(stock=all_stock_codes)))
[ "def", "update_stock_codes", "(", ")", ":", "all_stock_codes_url", "=", "\"http://www.shdjt.com/js/lib/astock.js\"", "grep_stock_codes", "=", "re", ".", "compile", "(", "r\"~(\\d+)`\"", ")", "response", "=", "requests", ".", "get", "(", "all_stock_codes_url", ")", "all_stock_codes", "=", "grep_stock_codes", ".", "findall", "(", "response", ".", "text", ")", "with", "open", "(", "stock_code_path", "(", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "dict", "(", "stock", "=", "all_stock_codes", ")", ")", ")" ]
47.875
11.625
def update(self, *args, **kwargs): """ Reimplements the :meth:`Dict.update` method. :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* """ dict.update(self, *args, **kwargs) self.__dict__.update(*args, **kwargs)
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dict", ".", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "__dict__", ".", "update", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
27.333333
12
def _forward_mode(self, *args): """Forward mode differentiation for a constant""" # Evaluate inner function self.f X: np.ndarray dX: np.ndarray X, dX = self.f._forward_mode(*args) # The function value val = self.func(X) # The derivative diff = self.deriv(X) * dX return (val, diff)
[ "def", "_forward_mode", "(", "self", ",", "*", "args", ")", ":", "# Evaluate inner function self.f", "X", ":", "np", ".", "ndarray", "dX", ":", "np", ".", "ndarray", "X", ",", "dX", "=", "self", ".", "f", ".", "_forward_mode", "(", "*", "args", ")", "# The function value", "val", "=", "self", ".", "func", "(", "X", ")", "# The derivative", "diff", "=", "self", ".", "deriv", "(", "X", ")", "*", "dX", "return", "(", "val", ",", "diff", ")" ]
31.909091
10.181818
def formatted_str(self, format): """Return formatted str. :param format: one of 'json', 'csv' are supported """ assert(format in ('json', 'csv')) ret_str_list = [] for rec in self._records: if format == 'json': ret_str_list.append('{') for i in xrange(len(rec)): colname, colval = self._rdef[i].name, rec[i] ret_str_list.append('"%s":"%s"' % (colname, str(colval).replace('"', r'\"'))) ret_str_list.append(',') ret_str_list.pop() # drop last comma ret_str_list.append('}%s' % (os.linesep)) elif format == 'csv': for i in xrange(len(rec)): colval = rec[i] ret_str_list.append('"%s"' % (str(colval).replace('"', r'\"'))) ret_str_list.append(',') ret_str_list.pop() # drop last comma ret_str_list.append('%s' % (os.linesep)) else: assert(False) return ''.join(ret_str_list)
[ "def", "formatted_str", "(", "self", ",", "format", ")", ":", "assert", "(", "format", "in", "(", "'json'", ",", "'csv'", ")", ")", "ret_str_list", "=", "[", "]", "for", "rec", "in", "self", ".", "_records", ":", "if", "format", "==", "'json'", ":", "ret_str_list", ".", "append", "(", "'{'", ")", "for", "i", "in", "xrange", "(", "len", "(", "rec", ")", ")", ":", "colname", ",", "colval", "=", "self", ".", "_rdef", "[", "i", "]", ".", "name", ",", "rec", "[", "i", "]", "ret_str_list", ".", "append", "(", "'\"%s\":\"%s\"'", "%", "(", "colname", ",", "str", "(", "colval", ")", ".", "replace", "(", "'\"'", ",", "r'\\\"'", ")", ")", ")", "ret_str_list", ".", "append", "(", "','", ")", "ret_str_list", ".", "pop", "(", ")", "# drop last comma", "ret_str_list", ".", "append", "(", "'}%s'", "%", "(", "os", ".", "linesep", ")", ")", "elif", "format", "==", "'csv'", ":", "for", "i", "in", "xrange", "(", "len", "(", "rec", ")", ")", ":", "colval", "=", "rec", "[", "i", "]", "ret_str_list", ".", "append", "(", "'\"%s\"'", "%", "(", "str", "(", "colval", ")", ".", "replace", "(", "'\"'", ",", "r'\\\"'", ")", ")", ")", "ret_str_list", ".", "append", "(", "','", ")", "ret_str_list", ".", "pop", "(", ")", "# drop last comma", "ret_str_list", ".", "append", "(", "'%s'", "%", "(", "os", ".", "linesep", ")", ")", "else", ":", "assert", "(", "False", ")", "return", "''", ".", "join", "(", "ret_str_list", ")" ]
41.846154
13.115385
def get_instance(self, payload): """ Build an instance of RecordingInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.call.recording.RecordingInstance :rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance """ return RecordingInstance( self._version, payload, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "RecordingInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "call_sid", "=", "self", ".", "_solution", "[", "'call_sid'", "]", ",", ")" ]
33.733333
18.933333
def _get_comments(group_tasks): """ Get the human readable comments and quantities for the task types. """ comments = {} for status, human in _COMMENTS: num_tasks = _get_number_of_tasks_for(status, group_tasks) if num_tasks: space = " " if status in _PENDING_SUB_STATUSES else "" comments[status] = '{space}* {num_tasks} {human}:\n'.format( space=space, num_tasks=num_tasks, human=human) return comments
[ "def", "_get_comments", "(", "group_tasks", ")", ":", "comments", "=", "{", "}", "for", "status", ",", "human", "in", "_COMMENTS", ":", "num_tasks", "=", "_get_number_of_tasks_for", "(", "status", ",", "group_tasks", ")", "if", "num_tasks", ":", "space", "=", "\" \"", "if", "status", "in", "_PENDING_SUB_STATUSES", "else", "\"\"", "comments", "[", "status", "]", "=", "'{space}* {num_tasks} {human}:\\n'", ".", "format", "(", "space", "=", "space", ",", "num_tasks", "=", "num_tasks", ",", "human", "=", "human", ")", "return", "comments" ]
36.071429
15.785714
def push(self, url, title=''): """ Pushes the url into the history stack at the current index. :param url | <str> :return <bool> | changed """ # ignore refreshes of the top level if self.currentUrl() == url or self._blockStack: return False self._blockStack = True self._stack = self._stack[:self._index+1] self._stack.append((nativestring(url), nativestring(title))) over = len(self._stack) - self.maximum() if over > 0: self._stack = self._stack[over:] self._index = len(self._stack) - 1 self.canGoBackChanged.emit(self.canGoBack()) self.canGoForwardChanged.emit(self.canGoForward()) self._blockStack = False return True
[ "def", "push", "(", "self", ",", "url", ",", "title", "=", "''", ")", ":", "# ignore refreshes of the top level", "if", "self", ".", "currentUrl", "(", ")", "==", "url", "or", "self", ".", "_blockStack", ":", "return", "False", "self", ".", "_blockStack", "=", "True", "self", ".", "_stack", "=", "self", ".", "_stack", "[", ":", "self", ".", "_index", "+", "1", "]", "self", ".", "_stack", ".", "append", "(", "(", "nativestring", "(", "url", ")", ",", "nativestring", "(", "title", ")", ")", ")", "over", "=", "len", "(", "self", ".", "_stack", ")", "-", "self", ".", "maximum", "(", ")", "if", "over", ">", "0", ":", "self", ".", "_stack", "=", "self", ".", "_stack", "[", "over", ":", "]", "self", ".", "_index", "=", "len", "(", "self", ".", "_stack", ")", "-", "1", "self", ".", "canGoBackChanged", ".", "emit", "(", "self", ".", "canGoBack", "(", ")", ")", "self", ".", "canGoForwardChanged", ".", "emit", "(", "self", ".", "canGoForward", "(", ")", ")", "self", ".", "_blockStack", "=", "False", "return", "True" ]
30.357143
17.142857
def getattr(self, obj, attribute): """Get an item or attribute of an object but prefer the attribute. Unlike :meth:`getitem` the attribute *must* be a bytestring. """ try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute)
[ "def", "getattr", "(", "self", ",", "obj", ",", "attribute", ")", ":", "try", ":", "return", "getattr", "(", "obj", ",", "attribute", ")", "except", "AttributeError", ":", "pass", "try", ":", "return", "obj", "[", "attribute", "]", "except", "(", "TypeError", ",", "LookupError", ",", "AttributeError", ")", ":", "return", "self", ".", "undefined", "(", "obj", "=", "obj", ",", "name", "=", "attribute", ")" ]
37.166667
13.916667
def normalize(pw): """ Lower case, and change the symbols to closest characters""" pw_lower = pw.lower() return ''.join(helper.L33T.get(c, c) for c in pw_lower)
[ "def", "normalize", "(", "pw", ")", ":", "pw_lower", "=", "pw", ".", "lower", "(", ")", "return", "''", ".", "join", "(", "helper", ".", "L33T", ".", "get", "(", "c", ",", "c", ")", "for", "c", "in", "pw_lower", ")" ]
42.25
14
def parse_site(sample, convention, Z): """ parse the site name from the sample name using the specified convention """ convention = str(convention) site = sample # default is that site = sample # # # Sample is final letter on site designation eg: TG001a (used by SIO lab # in San Diego) if convention == "1": return sample[:-1] # peel off terminal character # # Site-Sample format eg: BG94-1 (used by PGL lab in Beijing) # if convention == "2": parts = sample.strip('-').split('-') return parts[0] # # Sample is XXXX.YY where XXX is site and YY is sample # if convention == "3": parts = sample.split('.') return parts[0] # # Sample is XXXXYYY where XXX is site desgnation and YYY is Z long integer # if convention == "4": k = int(Z) - 1 return sample[0:-k] # peel off Z characters from site if convention == "5": # sample == site return sample if convention == "6": # should be names in orient.txt print("-W- Finding names in orient.txt is not currently supported") if convention == "7": # peel off Z characters for site k = int(Z) return sample[0:k] if convention == "8": # peel off Z characters for site return "" if convention == "9": # peel off Z characters for site return sample print("Error in site parsing routine") return
[ "def", "parse_site", "(", "sample", ",", "convention", ",", "Z", ")", ":", "convention", "=", "str", "(", "convention", ")", "site", "=", "sample", "# default is that site = sample", "#", "#", "# Sample is final letter on site designation eg: TG001a (used by SIO lab", "# in San Diego)", "if", "convention", "==", "\"1\"", ":", "return", "sample", "[", ":", "-", "1", "]", "# peel off terminal character", "#", "# Site-Sample format eg: BG94-1 (used by PGL lab in Beijing)", "#", "if", "convention", "==", "\"2\"", ":", "parts", "=", "sample", ".", "strip", "(", "'-'", ")", ".", "split", "(", "'-'", ")", "return", "parts", "[", "0", "]", "#", "# Sample is XXXX.YY where XXX is site and YY is sample", "#", "if", "convention", "==", "\"3\"", ":", "parts", "=", "sample", ".", "split", "(", "'.'", ")", "return", "parts", "[", "0", "]", "#", "# Sample is XXXXYYY where XXX is site desgnation and YYY is Z long integer", "#", "if", "convention", "==", "\"4\"", ":", "k", "=", "int", "(", "Z", ")", "-", "1", "return", "sample", "[", "0", ":", "-", "k", "]", "# peel off Z characters from site", "if", "convention", "==", "\"5\"", ":", "# sample == site", "return", "sample", "if", "convention", "==", "\"6\"", ":", "# should be names in orient.txt", "print", "(", "\"-W- Finding names in orient.txt is not currently supported\"", ")", "if", "convention", "==", "\"7\"", ":", "# peel off Z characters for site", "k", "=", "int", "(", "Z", ")", "return", "sample", "[", "0", ":", "k", "]", "if", "convention", "==", "\"8\"", ":", "# peel off Z characters for site", "return", "\"\"", "if", "convention", "==", "\"9\"", ":", "# peel off Z characters for site", "return", "sample", "print", "(", "\"Error in site parsing routine\"", ")", "return" ]
27.897959
23.244898
def _has_expired(self): """ Has this HIT expired yet? """ expired = False if hasattr(self, 'Expiration'): now = datetime.datetime.utcnow() expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ') expired = (now >= expiration) else: raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!") return expired
[ "def", "_has_expired", "(", "self", ")", ":", "expired", "=", "False", "if", "hasattr", "(", "self", ",", "'Expiration'", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "expiration", "=", "datetime", ".", "datetime", ".", "strptime", "(", "self", ".", "Expiration", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "expired", "=", "(", "now", ">=", "expiration", ")", "else", ":", "raise", "ValueError", "(", "\"ERROR: Request for expired property, but no Expiration in HIT!\"", ")", "return", "expired" ]
43
18.9