code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def average(old_avg, current_value, count): if old_avg is None: return current_value return (float(old_avg) * count + current_value) / (count + 1)
Calculate the average. Count must start with 0 >>> average(None, 3.23, 0) 3.23 >>> average(0, 1, 0) 1.0 >>> average(2.5, 5, 4) 3.0
def createDocument(self, nsuri, qname, doctype=None): impl = xml.dom.minidom.getDOMImplementation() return impl.createDocument(nsuri, qname, doctype)
Create a new writable DOM document object.
def name(self) -> str: if self.is_platform: if self._data["publicCode"]: return self._data['name'] + " Platform " + \ self._data["publicCode"] else: return self._data['name'] + " Platform " + \ self.place_id.split(':')[-1] else: return self._data['name']
Friendly name for the stop place or platform
def add_summary_stats_to_table(table_in, table_out, colnames): for col in colnames: col_in = table_in[col] stats = collect_summary_stats(col_in.data) for k, v in stats.items(): out_name = "%s_%s" % (col, k) col_out = Column(data=np.vstack( [v]), name=out_name, dtype=col_in.dtype, shape=v.shape, unit=col_in.unit) table_out.add_column(col_out)
Collect summary statisitics from an input table and add them to an output table Parameters ---------- table_in : `astropy.table.Table` Table with the input data. table_out : `astropy.table.Table` Table with the output data. colnames : list List of the column names to get summary statistics for.
def _fast_dataset( variables: 'OrderedDict[Any, Variable]', coord_variables: Mapping[Any, Variable], ) -> 'Dataset': from .dataset import Dataset variables.update(coord_variables) coord_names = set(coord_variables) return Dataset._from_vars_and_coord_names(variables, coord_names)
Create a dataset as quickly as possible. Beware: the `variables` OrderedDict is modified INPLACE.
def _hydrate_pivot_relation(self, models): for model in models: pivot = self.new_existing_pivot(self._clean_pivot_attributes(model)) model.set_relation("pivot", pivot)
Hydrate the pivot table relationship on the models. :type models: list
def parse_mysql_cnf(dbinfo): read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file') if read_default_file: config = configparser.RawConfigParser({ 'user': '', 'password': '', 'database': '', 'host': '', 'port': '', 'socket': '', }) import os config.read(os.path.expanduser(read_default_file)) try: user = config.get('client', 'user') password = config.get('client', 'password') database_name = config.get('client', 'database') database_host = config.get('client', 'host') database_port = config.get('client', 'port') socket = config.get('client', 'socket') if database_host == 'localhost' and socket: database_host = socket return user, password, database_name, database_host, database_port except configparser.NoSectionError: pass return '', '', '', '', ''
Attempt to parse mysql database config file for connection settings. Ideally we would hook into django's code to do this, but read_default_file is handled by the mysql C libs so we have to emulate the behaviour Settings that are missing will return '' returns (user, password, database_name, database_host, database_port)
def gen_csv(sc, filename): datafile = open(filename, 'wb') csvfile = csv.writer(datafile) csvfile.writerow(['Software Package Name', 'Count']) debug.write('Generating %s: ' % filename) fparams = {'fobj': csvfile} sc.query('listsoftware', func=writer, func_params=fparams) debug.write('\n') datafile.close()
csv SecurityCenterObj, EmailAddress
def _convert_addrinfo(cls, results: List[tuple]) -> Iterable[AddressInfo]: for result in results: family = result[0] address = result[4] ip_address = address[0] if family == socket.AF_INET6: flow_info = address[2] control_id = address[3] else: flow_info = None control_id = None yield AddressInfo(ip_address, family, flow_info, control_id)
Convert the result list to address info.
def devno_alloc(self): devno_int = self._devno_pool.alloc() devno = "{:04X}".format(devno_int) return devno
Allocates a device number unique to this partition, in the range of 0x8000 to 0xFFFF. Returns: string: The device number as four hexadecimal digits in upper case. Raises: ValueError: No more device numbers available in that range.
def match_filter(self, idx_list, pattern, dict_type=False, dict_key='name'): if dict_type is False: return self._return_deque([ obj for obj in idx_list if re.search(pattern, obj) ]) elif dict_type is True: return self._return_deque([ obj for obj in idx_list if re.search(pattern, obj.get(dict_key)) ]) else: return self._return_deque()
Return Matched items in indexed files. :param idx_list: :return list
def has_duplicate_max(x): if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) return np.sum(x == np.max(x)) >= 2
Checks if the maximum value of x is observed more than once :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: bool
def StartCli(args, adb_commands, extra=None, **device_kwargs): try: dev = adb_commands() dev.ConnectDevice(port_path=args.port_path, serial=args.serial, default_timeout_ms=args.timeout_ms, **device_kwargs) except usb_exceptions.DeviceNotFoundError as e: print('No device found: {}'.format(e), file=sys.stderr) return 1 except usb_exceptions.CommonUsbError as e: print('Could not connect to device: {}'.format(e), file=sys.stderr) return 1 try: return _RunMethod(dev, args, extra or {}) except Exception as e: sys.stdout.write(str(e)) return 1 finally: dev.Close()
Starts a common CLI interface for this usb path and protocol.
def validate_obj(keys, obj): msg = '' for k in keys: if isinstance(k, str): if k not in obj or (not isinstance(obj[k], list) and not obj[k]): if msg: msg = "%s," % msg msg = "%s%s" % (msg, k) elif isinstance(k, list): found = False for k_a in k: if k_a in obj: found = True if not found: if msg: msg = "%s," % msg msg = "%s(%s" % (msg, ','.join(k)) if msg: msg = "%s missing" % msg return msg
Super simple "object" validation.
def guess(cls, sample): if isinstance(sample, TypeEngine): return sample if isinstance(sample, bool): return cls.boolean elif isinstance(sample, int): return cls.integer elif isinstance(sample, float): return cls.float elif isinstance(sample, datetime): return cls.datetime elif isinstance(sample, date): return cls.date return cls.text
Given a single sample, guess the column type for the field. If the sample is an instance of an SQLAlchemy type, the type will be used instead.
def _get_file_paths(cur): out = [] if isinstance(cur, (list, tuple)): for x in cur: new = _get_file_paths(x) if new: out.extend(new) elif isinstance(cur, dict): if "class" in cur: out.append(cur["path"]) else: for k, v in cur.items(): new = _get_file_paths(v) if new: out.extend(new) return out
Retrieve a list of file paths, recursively traversing the
def hdel(self, key, *fields): if not fields: future = concurrent.TracebackFuture() future.set_result(0) else: future = self._execute([b'HDEL', key] + list(fields)) return future
Remove the specified fields from the hash stored at `key`. Specified fields that do not exist within this hash are ignored. If `key` does not exist, it is treated as an empty hash and this command returns zero. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: the number of fields that were removed from the hash, not including specified by non-existing fields. :rtype: int
def get_too_few_non_zero_degree_day_warning( model_type, balance_point, degree_day_type, degree_days, minimum_non_zero ): warnings = [] n_non_zero = int((degree_days > 0).sum()) if n_non_zero < minimum_non_zero: warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format( model_type=model_type, degree_day_type=degree_day_type ) ), description=( "Number of non-zero daily {degree_day_type} values below accepted minimum." " Candidate fit not attempted.".format( degree_day_type=degree_day_type.upper() ) ), data={ "n_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): n_non_zero, "minimum_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): minimum_non_zero, "{degree_day_type}_balance_point".format( degree_day_type=degree_day_type ): balance_point, }, ) ) return warnings
Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
def docid(url, encoding='ascii'): if not isinstance(url, bytes): url = url.encode(encoding) parser = _URL_PARSER idx = 0 for _c in url: if _c not in _HEX: if not (_c == _SYM_MINUS and (idx == _DOMAINID_LENGTH or idx == _HOSTID_LENGTH + 1)): return parser.parse(url, idx) idx += 1 if idx > 4: break _l = len(url) if _l == _DOCID_LENGTH: parser = _DOCID_PARSER elif _l == _READABLE_DOCID_LENGTH \ and url[_DOMAINID_LENGTH] == _SYM_MINUS \ and url[_HOSTID_LENGTH + 1] == _SYM_MINUS: parser = _R_DOCID_PARSER else: parser = _PARSER return parser.parse(url, idx)
Get DocID from URL. DocID generation depends on bytes of the URL string. So, if non-ascii charactors in the URL, encoding should be considered properly. Args: url (str or bytes): Pre-encoded bytes or string will be encoded with the 'encoding' argument. encoding (str, optional): Defaults to 'ascii'. Used to encode url argument if it is not pre-encoded into bytes. Returns: DocID: The DocID object. Examples: >>> from os_docid import docid >>> docid('http://www.google.com/') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('1d5920f4b44b27a8ed646a3334ca891fff90821feeb2b02a33a6f9fc8e5f3fcd') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('abc') NotImplementedError: Not supported data format
def Element(self, elem, **params): res = self.__call__(deepcopy(elem), **params) if len(res) > 0: return res[0] else: return None
Ensure that the input element is immutable by the transformation. Returns a single element.
def _compute_a22_factor(self, imt): if imt.name == 'PGV': return 0.0 period = imt.period if period < 2.0: return 0.0 else: return 0.0625 * (period - 2.0)
Compute and return the a22 factor, equation 20, page 80.
async def executescript(self, sql_script: str) -> Cursor: cursor = await self._execute(self._conn.executescript, sql_script) return Cursor(self, cursor)
Helper to create a cursor and execute a user script.
def listDatasetParents(self, dataset=''): try: return self.dbsDataset.listDatasetParents(dataset) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listDatasetParents. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
API to list A datasets parents in DBS. :param dataset: dataset (Required) :type dataset: str :returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset) :rtype: list of dicts
def parse_entry(source, loc, tokens): type_ = tokens[1].lower() entry_type = structures.TypeRegistry.get_type(type_) if entry_type is None or not issubclass(entry_type, structures.Entry): raise exceptions.UnsupportedEntryType( "%s is not a supported entry type" % type_ ) new_entry = entry_type() new_entry.name = tokens[3] for key, value in [t for t in tokens[4:-1] if t != ',']: new_entry[key] = value return new_entry
Converts the tokens of an entry into an Entry instance. If no applicable type is available, an UnsupportedEntryType exception is raised.
def send_request(req=None, method=None, requires_response=True): if req is None: return functools.partial(send_request, method=method, requires_response=requires_response) @functools.wraps(req) def wrapper(self, *args, **kwargs): params = req(self, *args, **kwargs) _id = self.send(method, params, requires_response) return _id wrapper._sends = method return wrapper
Call function req and then send its results via ZMQ.
def _parse_document(document: Path, system: System = None, profile=EProfile.FULL): logger.debug('parse document: {0}'.format(document)) stream = FileStream(str(document), encoding='utf-8') system = FileSystem._parse_stream(stream, system, document, profile) FileSystem.merge_annotations(system, document.stripext() + '.yaml') return system
Parses a document and returns the resulting domain system :param path: document path to parse :param system: system to be used (optional)
def rcts(self, command, *args, **kwargs): cls = self.__class__ name = kwargs.pop('name','') date = kwargs.pop('date',None) data = kwargs.pop('data',None) kwargs.pop('bycolumn',None) ts = cls(name=name,date=date,data=data) ts._ts = self.rc(command, *args, **kwargs) return ts
General function for applying a rolling R function to a timeserie
def update_indel(self, nucmer_snp): new_variant = Variant(nucmer_snp) if self.var_type not in [INS, DEL] \ or self.var_type != new_variant.var_type \ or self.qry_name != new_variant.qry_name \ or self.ref_name != new_variant.ref_name \ or self.reverse != new_variant.reverse: return False if self.var_type == INS \ and self.ref_start == new_variant.ref_start \ and self.qry_end + 1 == new_variant.qry_start: self.qry_base += new_variant.qry_base self.qry_end += 1 return True if self.var_type == DEL \ and self.qry_start == new_variant.qry_start \ and self.ref_end + 1 == new_variant.ref_start: self.ref_base += new_variant.ref_base self.ref_end += 1 return True return False
Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False
def save(self, output=''): self.file = self._get_output_file(output) with open(self.file, 'w', encoding='utf-8') as f: self.write(f)
Save the document. If no output is provided the file will be saved in the same location. Otherwise output can determine a target directory or file.
def _write_incron_lines(user, lines): if user == 'system': ret = {} ret['retcode'] = _write_file(_INCRON_SYSTEM_TAB, 'salt', ''.join(lines)) return ret else: path = salt.utils.files.mkstemp() with salt.utils.files.fopen(path, 'wb') as fp_: fp_.writelines(salt.utils.data.encode(lines)) if __grains__['os_family'] == 'Solaris' and user != "root": __salt__['cmd.run']('chown {0} {1}'.format(user, path), python_shell=False) ret = __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False) os.remove(path) return ret
Takes a list of lines to be committed to a user's incrontab and writes it
def collections(self): if self.cache: return self.cache.get( self.app.config['COLLECTIONS_CACHE_KEY'])
Get list of collections.
def packet_get_samples_per_frame(data, fs): data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(fs)) if result < 0: raise OpusError(result) return result
Gets the number of samples per frame from an Opus packet
def dict_hist(item_list, weight_list=None, ordered=False, labels=None): r if labels is None: hist_ = defaultdict(int) else: hist_ = {k: 0 for k in labels} if weight_list is None: for item in item_list: hist_[item] += 1 else: for item, weight in zip(item_list, weight_list): hist_[item] += weight if ordered: getval = op.itemgetter(1) key_order = [key for (key, value) in sorted(hist_.items(), key=getval)] hist_ = order_dict_by(hist_, key_order) return hist_
r""" Builds a histogram of items in item_list Args: item_list (list): list with hashable items (usually containing duplicates) Returns: dict : dictionary where the keys are items in item_list, and the values are the number of times the item appears in item_list. CommandLine: python -m utool.util_dict --test-dict_hist Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900] >>> hist_ = dict_hist(item_list) >>> result = ut.repr2(hist_) >>> print(result) {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}
def updatePassword(self, user, currentPassword, newPassword): return self.__post('/api/updatePassword', data={ 'user': user, 'currentPassword': currentPassword, 'newPassword': newPassword })
Change the password of a user.
def parseStr(self, html): self.reset() if isinstance(html, bytes): self.feed(html.decode(self.encoding)) else: self.feed(html)
parseStr - Parses a string and creates the DOM tree and indexes. @param html <str> - valid HTML
def delete(self, ids): url = build_uri_with_ids('api/v3/networkv4/%s/', ids) return super(ApiNetworkIPv4, self).delete(url)
Method to delete network-ipv4's by their ids :param ids: Identifiers of network-ipv4's :return: None
def get_nadir(self, channel=0) -> tuple: if isinstance(channel, int): channel_index = channel elif isinstance(channel, str): channel_index = self.channel_names.index(channel) else: raise TypeError("channel: expected {int, str}, got %s" % type(channel)) channel = self.channels[channel_index] idx = channel.argmin() return tuple(a[idx] for a in self._axes)
Get the coordinates, in units, of the minimum in a channel. Parameters ---------- channel : int or str (optional) Channel. Default is 0. Returns ------- generator of numbers Coordinates in units for each axis.
def _AnalyzeKeywords(self, keywords): start_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("180d") filtered_keywords = [] for k in keywords: if k.startswith(self.START_TIME_PREFIX): try: start_time = rdfvalue.RDFDatetime.FromHumanReadable( k[self.START_TIME_PREFIX_LEN:]) except ValueError: pass else: filtered_keywords.append(k) if not filtered_keywords: filtered_keywords.append(".") return start_time, filtered_keywords
Extracts a start time from a list of keywords if present.
def meta_request(func): def inner(self, resource, *args, **kwargs): serialize_response = kwargs.pop('serialize', True) try: resp = func(self, resource, *args, **kwargs) except ApiException as e: raise api_exception(e) if serialize_response: return serialize(resource, resp) return resp return inner
Handles parsing response structure and translating API Exceptions
def __get_last_update_time(): now = datetime.datetime.utcnow() first_tuesday = __get_first_tuesday(now) if first_tuesday < now: return first_tuesday else: first_of_month = datetime.datetime(now.year, now.month, 1) last_month = first_of_month + datetime.timedelta(days=-1) return __get_first_tuesday(last_month)
Returns last FTP site update time
def _jit_pairwise_distances(pos1, pos2): n1 = pos1.shape[0] n2 = pos2.shape[0] D = np.empty((n1, n2)) for i in range(n1): for j in range(n2): D[i, j] = np.sqrt(((pos1[i] - pos2[j])**2).sum()) return D
Optimized function for calculating the distance between each pair of points in positions1 and positions2. Does use python mode as fallback, if a scalar and not an array is given.
def maxDepth(self, currentDepth=0): if not any((self.left, self.right)): return currentDepth result = 0 for child in (self.left, self.right): if child: result = max(result, child.maxDepth(currentDepth + 1)) return result
Compute the depth of the longest branch of the tree
def add_path(tdict, path): t = tdict for step in path[:-2]: try: t = t[step] except KeyError: t[step] = {} t = t[step] t[path[-2]] = path[-1] return tdict
Create or extend an argument tree `tdict` from `path`. :param tdict: a dictionary representing a argument tree :param path: a path list :return: a dictionary Convert a list of items in a 'path' into a nested dict, where the second to last item becomes the key for the final item. The remaining items in the path become keys in the nested dict around that final pair of items. For example, for input values of: tdict={} path = ['assertion', 'subject', 'subject_confirmation', 'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer'] Returns an output value of: {'assertion': {'subject': {'subject_confirmation': {'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}} Another example, this time with a non-empty tdict input: tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}, path=['subject_confirmation_data', 'in_response_to', '_012345'] Returns an output value of: {'subject_confirmation_data': {'in_response_to': '_012345'}, 'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}
def find_one_by_id(self, _id): document = (yield self.collection.find_one({"_id": ObjectId(_id)})) raise Return(self._obj_cursor_to_dictionary(document))
Find a single document by id :param str _id: BSON string repreentation of the Id :return: a signle object :rtype: dict
def dataReceived(self, data): self.connection._iobuf.write(data) self.connection.handle_read()
Callback function that is called when data has been received on the connection. Reaches back to the Connection object and queues the data for processing.
def suggest(self, *replies): chips = [] for r in replies: chips.append({"title": r}) self._messages.append( {"platform": "ACTIONS_ON_GOOGLE", "suggestions": {"suggestions": chips}} ) return self
Use suggestion chips to hint at responses to continue or pivot the conversation
def generate_look_up_table(): poly = 0xA001 table = [] for index in range(256): data = index << 1 crc = 0 for _ in range(8, 0, -1): data >>= 1 if (data ^ crc) & 0x0001: crc = (crc >> 1) ^ poly else: crc >>= 1 table.append(crc) return table
Generate look up table. :return: List
def get_hash(name, password=None): if '.p12' in name[-4:]: cmd = 'openssl pkcs12 -in {0} -passin pass:{1} -passout pass:{1}'.format(name, password) else: cmd = 'security find-certificate -c "{0}" -m -p'.format(name) out = __salt__['cmd.run'](cmd) matches = re.search('-----BEGIN CERTIFICATE-----(.*)-----END CERTIFICATE-----', out, re.DOTALL | re.MULTILINE) if matches: return matches.group(1) else: return False
Returns the hash of a certificate in the keychain. name The name of the certificate (which you can get from keychain.get_friendly_name) or the location of a p12 file. password The password that is used in the certificate. Only required if your passing a p12 file. Note: This will be outputted to logs CLI Example: .. code-block:: bash salt '*' keychain.get_hash /tmp/test.p12 test123
def url_to_tile(url): info = url.strip('/').split('/') name = ''.join(info[-7: -4]) date = '-'.join(info[-4: -1]) return name, date, int(info[-1])
Extracts tile name, date and AWS index from tile url on AWS. :param url: class input parameter 'metafiles' :type url: str :return: Name of tile, date and AWS index which uniquely identifies tile on AWS :rtype: (str, str, int)
def remove_xml_element_string(name, content): ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata") tree = ET.fromstring(content) tree = remove_xml_element(name, tree) clean_content = ET.tostring(tree, encoding=UTF8) return clean_content
Remove XML elements from a string
def get_processor_description(self, cpu_id): if not isinstance(cpu_id, baseinteger): raise TypeError("cpu_id can only be an instance of type baseinteger") description = self._call("getProcessorDescription", in_p=[cpu_id]) return description
Query the model string of a specified host CPU. in cpu_id of type int Identifier of the CPU. The current implementation might not necessarily return the description for this exact CPU. return description of type str Model string. An empty string is returned if value is not known or @a cpuId is invalid.
def open_gif(self, filename): if filename[-3:] != 'gif': raise Exception('Unsupported filetype. Must end in .gif') if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) self._gif_filename = os.path.abspath(filename) self.mwriter = imageio.get_writer(filename, mode='I')
Open a gif file. Parameters ---------- filename : str Filename of the gif to open. Filename must end in gif.
def set_func(self, name, func): self.func_name = name self.func = func
Set the processing function to use for this node. Args: name (str): The name of the function to use. This is just stored for reference in case we need to serialize the node later. func (callable): A function that is called to process inputs for this node. It should have the following signature: callable(input1_walker, input2_walker, ...) It should return a list of IOTileReadings that are then pushed into the node's output stream
def fit(self, X): U, V = self.split_matrix(X) self.tau = stats.kendalltau(U, V)[0] self.theta = self.compute_theta() self.check_theta()
Fit a model to the data updating the parameters. Args: X: `np.ndarray` of shape (,2). Return: None
def set_context_menu(self, context_menu): self.context_menu = context_menu self.context_menu.tree_view = self self.context_menu.init_actions() for action in self.context_menu.actions(): self.addAction(action)
Sets the context menu of the tree view. :param context_menu: QMenu
def _getSyntaxByLanguageName(self, syntaxName): xmlFileName = self._syntaxNameToXmlFileName[syntaxName] return self._getSyntaxByXmlFileName(xmlFileName)
Get syntax by its name. Name is defined in the xml file
def get_voms_proxy_user(): out = _voms_proxy_info(["--identity"])[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in voms proxy: {}".format(out))
Returns the owner of the voms proxy.
def EndVector(self, vectorNumElems): self.assertNested() self.nested = False self.PlaceUOffsetT(vectorNumElems) return self.Offset()
EndVector writes data necessary to finish vector construction.
def page_not_found(request, template_name="errors/404.html"): context = { "STATIC_URL": settings.STATIC_URL, "request_path": request.path, } t = get_template(template_name) return HttpResponseNotFound(t.render(context, request))
Mimics Django's 404 handler but with a different template path.
def validate_unwrap(self, value): if not isinstance(value, list): self._fail_validation_type(value, list) for value_dict in value: if not isinstance(value_dict, dict): cause = BadValueException('', value_dict, 'Values in a KVField list must be dicts') self._fail_validation(value, 'Values in a KVField list must be dicts', cause=cause) k = value_dict.get('k') v = value_dict.get('v') if k is None: self._fail_validation(value, 'Value had None for a key') try: self.key_type.validate_unwrap(k) except BadValueException as bve: self._fail_validation(value, 'Bad value for KVField key %s' % k, cause=bve) try: self.value_type.validate_unwrap(v) except BadValueException as bve: self._fail_validation(value, 'Bad value for KFVield value %s' % k, cause=bve) return True
Expects a list of dictionaries with ``k`` and ``v`` set to the keys and values that will be unwrapped into the output python dictionary should have
def reverse_hash(hash, hex_format=True): if not hex_format: hash = hexlify(hash) return "".join(reversed([hash[i:i+2] for i in range(0, len(hash), 2)]))
hash is in hex or binary format
def pipeline_counter(self): if 'pipeline_counter' in self.data and self.data.pipeline_counter: return self.data.get('pipeline_counter') elif self.stage.pipeline is not None: return self.stage.pipeline.data.counter else: return self.stage.data.pipeline_counter
Get pipeline counter of current job instance. Because instantiating job instance could be performed in different ways and those return different results, we have to check where from to get counter of the pipeline. :return: pipeline counter.
def _get_arch(): try: si = GetNativeSystemInfo() except Exception: si = GetSystemInfo() try: return _arch_map[si.id.w.wProcessorArchitecture] except KeyError: return ARCH_UNKNOWN
Determines the current processor architecture. @rtype: str @return: On error, returns: - L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg. On success, returns one of the following values: - L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible. - L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible. May also return one of the following values if you get both Python and WinAppDbg to work in such machines... let me know if you do! :) - L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors. - L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors. - L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors. - L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors. - L{ARCH_ARM} (C{"arm"}) for ARM compatible processors. - L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible. - L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors. - L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine. - L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors. Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium machine should return C{ARCH_IA64} both on Wine and proper Windows. All other values should only be returned on Linux using Wine.
def _execute_single_level_task(self): self.log(u"Executing single level task...") try: self._step_begin(u"extract MFCC real wave") real_wave_mfcc = self._extract_mfcc( file_path=self.task.audio_file_path_absolute, file_format=None, ) self._step_end() self._step_begin(u"compute head tail") (head_length, process_length, tail_length) = self._compute_head_process_tail(real_wave_mfcc) real_wave_mfcc.set_head_middle_tail(head_length, process_length, tail_length) self._step_end() self._set_synthesizer() sync_root = Tree() self._execute_inner( real_wave_mfcc, self.task.text_file, sync_root=sync_root, force_aba_auto=False, log=True, leaf_level=True ) self._clear_cache_synthesizer() self._step_begin(u"create sync map") self._create_sync_map(sync_root=sync_root) self._step_end() self._step_total() self.log(u"Executing single level task... done") except Exception as exc: self._step_failure(exc)
Execute a single-level task
def edge_index(self): return dict((edge, index) for index, edge in enumerate(self.edges))
A map to look up the index of a edge
def listen(self): with Consumer(self.connection, queues=self.queue, on_message=self.on_message, auto_declare=False): for _ in eventloop(self.connection, timeout=1, ignore_timeouts=True): pass
Listens to messages
def children(self): if isinstance(self.content, list): return self.content elif isinstance(self.content, Element): return [self.content] else: return []
Returns all of the children elements.
def get_buckets(self, bucket_type=None, timeout=None): msg_code = riak.pb.messages.MSG_CODE_LIST_BUCKETS_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_buckets(bucket_type, timeout, streaming=False) resp_code, resp = self._request(msg, codec) return resp.buckets
Serialize bucket listing request and deserialize response
def load_from_file(self, fname=None): fname = fname or psyplot_fname() if fname and os.path.exists(fname): with open(fname) as f: d = yaml.load(f) self.update(d) if (d.get('project.plotters.user') and 'project.plotters' in self): self['project.plotters'].update(d['project.plotters.user'])
Update rcParams from user-defined settings This function updates the instance with what is found in `fname` Parameters ---------- fname: str Path to the yaml configuration file. Possible keys of the dictionary are defined by :data:`config.rcsetup.defaultParams`. If None, the :func:`config.rcsetup.psyplot_fname` function is used. See Also -------- dump_to_file, psyplot_fname
def similar_email(anon, obj, field, val): return val if 'betterworks.com' in val else '@'.join([anon.faker.user_name(field=field), val.split('@')[-1]])
Generate a random email address using the same domain.
def vector_is_zero(vector_in, tol=10e-8): if not isinstance(vector_in, (list, tuple)): raise TypeError("Input vector must be a list or a tuple") res = [False for _ in range(len(vector_in))] for idx in range(len(vector_in)): if abs(vector_in[idx]) < tol: res[idx] = True return all(res)
Checks if the input vector is a zero vector. :param vector_in: input vector :type vector_in: list, tuple :param tol: tolerance value :type tol: float :return: True if the input vector is zero, False otherwise :rtype: bool
def rws_call(ctx, method, default_attr=None): try: response = ctx.obj['RWS'].send_request(method) if ctx.obj['RAW']: result = ctx.obj['RWS'].last_result.text elif default_attr is not None: result = "" for item in response: result = result + item.__dict__[default_attr] + "\n" else: result = ctx.obj['RWS'].last_result.text if ctx.obj['OUTPUT']: ctx.obj['OUTPUT'].write(result.encode('utf-8')) else: click.echo(result) except RWSException as e: click.echo(str(e))
Make request to RWS
def product_url(self, product): url = 'product/%s' % product return posixpath.join(self.url, url)
Return a human-friendly URL for this product. :param product: str, eg. "ceph" :returns: str, URL
def read_string(buff, byteorder='big'): length = read_numeric(USHORT, buff, byteorder) return buff.read(length).decode('utf-8')
Read a string from a file-like object.
def _validate_depedencies(batches): transaction_ids = set() for batch in batches: for txn in batch.transactions: txn_header = TransactionHeader() txn_header.ParseFromString(txn.header) if txn_header.dependencies: unsatisfied_deps = [ id for id in txn_header.dependencies if id not in transaction_ids ] if unsatisfied_deps: raise CliException( 'Unsatisfied dependency in given transactions:' ' {}'.format(unsatisfied_deps)) transaction_ids.add(txn.header_signature)
Validates the transaction dependencies for the transactions contained within the sequence of batches. Given that all the batches are expected to to be executed for the genesis blocks, it is assumed that any dependent transaction will proceed the depending transaction.
def info(self, msg): self._execActions('info', msg) msg = self._execFilters('info', msg) self._processMsg('info', msg) self._sendMsg('info', msg)
Log Info Messages
def check(cls, status): assert cls.trigger is not None, 'Invalid ErrorTrap, trigger not set' assert cls.error is not None, 'Invalid ErrorTrap, error not set' if status == cls.trigger: raise cls.error()
Checks if a status enum matches the trigger originally set, and if so, raises the appropriate error. Args: status (int, enum): A protobuf enum response status to check. Raises: AssertionError: If trigger or error were not set. _ApiError: If the statuses don't match. Do not catch. Will be caught automatically and sent back to the client.
def set_desired_state(self, state): if state not in self._STATES: raise ValueError( 'state must be one of: {0}'.format( self._STATES )) self._data['desiredState'] = state if self._is_live(): self._update('_data', self._client.set_unit_desired_state(self.name, self.desiredState)) return self._data['desiredState']
Update the desired state of a unit. Args: state (str): The desired state for the unit, must be one of ``_STATES`` Returns: str: The updated state Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400 ValueError: An invalid value for ``state`` was provided
def visit_statements(self, nodes): primals, adjoints = [], collections.deque() for node in nodes: primal, adjoint = self.visit(node) if not isinstance(primal, list): primal = [primal] if not isinstance(adjoint, list): adjoint = [adjoint] primals.extend(filter(None, primal)) adjoints.extendleft(filter(None, adjoint[::-1])) return primals, list(adjoints)
Generate the adjoint of a series of statements.
def _check_user(user, group): err = '' if user: uid = __salt__['file.user_to_uid'](user) if uid == '': err += 'User {0} is not available '.format(user) if group: gid = __salt__['file.group_to_gid'](group) if gid == '': err += 'Group {0} is not available'.format(group) return err
Checks if the named user and group are present on the minion
def getEdges(self, edges, inEdges = True, outEdges = True, rawResults = False) : try : return edges.getEdges(self, inEdges, outEdges, rawResults) except AttributeError : raise AttributeError("%s does not seem to be a valid Edges object" % edges)
returns in, out, or both edges linked to self belonging the collection 'edges'. If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects
def heartbeat(self): logger.debug('Heartbeating %s (ttl = %s)', self.jid, self.ttl) try: self.expires_at = float(self.client('heartbeat', self.jid, self.client.worker_name, json.dumps(self.data)) or 0) except QlessException: raise LostLockException(self.jid) logger.debug('Heartbeated %s (ttl = %s)', self.jid, self.ttl) return self.expires_at
Renew the heartbeat, if possible, and optionally update the job's user data.
def send_voice(self, voice, **options): return self.bot.api_call( "sendVoice", chat_id=str(self.id), voice=voice, **options )
Send an OPUS-encoded .ogg audio file. :param voice: Object containing the audio data :param options: Additional sendVoice options (see https://core.telegram.org/bots/api#sendvoice) :Example: >>> with open("voice.ogg", "rb") as f: >>> await chat.send_voice(f)
def get_resource(self, request, filename): filename = join("shared", basename(filename)) try: data = pkgutil.get_data(__package__, filename) except OSError: data = None if data is not None: mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" return Response(data, mimetype=mimetype) return Response("Not Found", status=404)
Return a static resource from the shared folder.
def get_permission_groups(self, username): if not self.organization or not self.username or not self.access_token: return [] elif (self.username_prefix and not username.startswith(self.username_prefix)): return [] data = self._fetch_groups() if not data: self.log.error("No cached groups from GitHub available") return [] else: return data.get(username[len(self.username_prefix):], [])
Return a list of names of the groups that the user with the specified name is a member of. Implements an `IPermissionGroupProvider` API. This specific implementation connects to GitHub with a dedicated user, fetches and caches the teams and their users configured at GitHub and converts the data into a format usable for easy access by username.
def mesh_axis_to_cumprod(self, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims) ta2cumprod = tensor_shape.cumprod return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]
For each mesh axis, give the product of previous tensor axes. Args: tensor_shape: Shape. Returns: list with length self.ndims where each element is an integer or None.
def update(cwd, targets=None, user=None, username=None, password=None, *opts): if targets: opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('update', cwd, user, username, password, opts)
Update the current directory, files, or directories from the remote Subversion repository cwd The path to the Subversion repository targets : None files and directories to pass to the command as arguments Default: svn uses '.' user : None Run svn as a user other than what the minion runs as password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 username : None Connect to the Subversion server as another user CLI Example: .. code-block:: bash salt '*' svn.update /path/to/repo
def compute_logarithmic_scale(min_, max_, min_scale, max_scale): if max_ <= 0 or min_ <= 0: return [] min_order = int(floor(log10(min_))) max_order = int(ceil(log10(max_))) positions = [] amplitude = max_order - min_order if amplitude <= 1: return [] detail = 10. while amplitude * detail < min_scale * 5: detail *= 2 while amplitude * detail > max_scale * 3: detail /= 2 for order in range(min_order, max_order + 1): for i in range(int(detail)): tick = (10 * i / detail or 1) * 10**order tick = round_to_scale(tick, tick) if min_ <= tick <= max_ and tick not in positions: positions.append(tick) return positions
Compute an optimal scale for logarithmic
def create_view(self, request): kwargs = {'model_admin': self} view_class = self.create_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view to provide 'creation' functionality for the assigned model, or redirect to Wagtail's create view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'create_view_class' attribute.
def _validate_name(name): name = six.text_type(name) name_length = len(name) regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$') if name_length < 3 or name_length > 48: ret = False elif not re.match(regex, name): ret = False else: ret = True if ret is False: log.warning( 'A Linode label may only contain ASCII letters or numbers, dashes, and ' 'underscores, must begin and end with letters or numbers, and be at least ' 'three characters in length.' ) return ret
Checks if the provided name fits Linode's labeling parameters. .. versionadded:: 2015.5.6 name The VM name to validate
def _initialize(self, boto_session, sagemaker_client, sagemaker_runtime_client): self.boto_session = boto_session or boto3.Session() self._region_name = self.boto_session.region_name if self._region_name is None: raise ValueError('Must setup local AWS configuration with a region supported by SageMaker.') self.sagemaker_client = LocalSagemakerClient(self) self.sagemaker_runtime_client = LocalSagemakerRuntimeClient(self.config) self.local_mode = True
Initialize this Local SageMaker Session.
def _installer(self, package_list, install_string=None): packages = ' '.join(package_list) if install_string is None: self.install_string = self.install_process[self.distro] % packages else: self.install_string = install_string output, outcome = self.shell.run_command(command=self.install_string) if outcome is False: raise IOError(output)
Install operating system packages for the system. :param: package_list: ``list`` :param install_string: ``str``
def libvlc_audio_output_list_release(p_list): f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \ _Cfunction('libvlc_audio_output_list_release', ((1,),), None, None, ctypes.POINTER(AudioOutput)) return f(p_list)
Frees the list of available audio output modules. @param p_list: list with audio outputs for release.
def restore_sampler(fname): hf = tables.open_file(fname) fnode = hf.root.__sampler__ import pickle sampler = pickle.load(fnode) return sampler
Creates a new sampler from an hdf5 database.
def check_is_injectable(func): @wraps(func) def wrapper(**kwargs): name = kwargs['inj_name'] if not orca.is_injectable(name): abort(404) return func(**kwargs) return wrapper
Decorator that will check whether the "inj_name" keyword argument to the wrapped function matches a registered Orca injectable.
def rdfs_classes(rdf): upperclasses = {} for s, o in rdf.subject_objects(RDFS.subClassOf): upperclasses.setdefault(s, set()) for uc in rdf.transitive_objects(s, RDFS.subClassOf): if uc != s: upperclasses[s].add(uc) for s, ucs in upperclasses.items(): logging.debug("setting superclass types: %s -> %s", s, str(ucs)) for res in rdf.subjects(RDF.type, s): for uc in ucs: rdf.add((res, RDF.type, uc))
Perform RDFS subclass inference. Mark all resources with a subclass type with the upper class.
def _set_available_combinations(self): available = set() combinations_map = {} whitelist = None if self.output_combinations: whitelist = self.output_combinations.split("|") self.max_width = 0 for output in range(len(self.layout["connected"])): for comb in combinations(self.layout["connected"], output + 1): for mode in ["clone", "extend"]: string = self._get_string_and_set_width(comb, mode) if whitelist and string not in whitelist: continue if len(comb) == 1: combinations_map[string] = (comb, None) else: combinations_map[string] = (comb, mode) available.add(string) if whitelist: available = reversed([comb for comb in whitelist if comb in available]) self.available_combinations = deque(available) self.combinations_map = combinations_map
Generate all connected outputs combinations and set the max display width while iterating.
def available_streams(): sds = kp.db.StreamDS() print("Available streams: ") print(', '.join(sorted(sds.streams)))
Show a short list of available streams.
def resolve_model(self, model): if not model: raise ValueError('Unsupported model specifications') if isinstance(model, basestring): classname = model elif isinstance(model, dict) and 'class' in model: classname = model['class'] else: raise ValueError('Unsupported model specifications') try: return get_document(classname) except self.NotRegistered: message = 'Model "{0}" does not exist'.format(classname) raise ValueError(message)
Resolve a model given a name or dict with `class` entry. :raises ValueError: model specification is wrong or does not exists
def generate_rst_docs(directory, classes, missing_objects=None): doc_by_filename = _generate_rst_docs(classes=classes, missing_objects=missing_objects) for filename, doc in doc_by_filename: with open(directory + filename, 'w') as f2: f2.write(doc)
Generate documentation for tri.declarative APIs :param directory: directory to write the .rst files into :param classes: list of classes to generate documentation for :param missing_objects: tuple of objects to count as missing markers, if applicable