code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def generate_phase_2(phase_1, dim = 40): """ The second step in creating datapoints in the Poirazi & Mel model. This takes a phase 1 vector, and creates a phase 2 vector where each point is the product of four elements of the phase 1 vector, randomly drawn with replacement. """ phase_2 = [] for i in range(dim): indices = [numpy.random.randint(0, dim) for i in range(4)] phase_2.append(numpy.prod([phase_1[i] for i in indices])) return phase_2
The second step in creating datapoints in the Poirazi & Mel model. This takes a phase 1 vector, and creates a phase 2 vector where each point is the product of four elements of the phase 1 vector, randomly drawn with replacement.
def create_endpoints_csv_file(self, timeout=-1): """ Creates an endpoints CSV file for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Endpoint CSV File Response. """ uri = "{}/endpoints/".format(self.data["uri"]) return self._helper.do_post(uri, {}, timeout, None)
Creates an endpoints CSV file for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Endpoint CSV File Response.
def add_pic(self, id_, name, desc, rId, x, y, cx, cy): """ Append a ``<p:pic>`` shape to the group/shapetree having properties as specified in call. """ pic = CT_Picture.new_pic(id_, name, desc, rId, x, y, cx, cy) self.insert_element_before(pic, 'p:extLst') return pic
Append a ``<p:pic>`` shape to the group/shapetree having properties as specified in call.
def getModelIDFromParamsHash(self, paramsHash): """ Return the modelID of the model with the given paramsHash, or None if not found. Parameters: --------------------------------------------------------------------- paramsHash: paramsHash to look for retval: modelId, or None if not found """ entryIdx = self. _paramsHashToIndexes.get(paramsHash, None) if entryIdx is not None: return self._allResults[entryIdx]['modelID'] else: return None
Return the modelID of the model with the given paramsHash, or None if not found. Parameters: --------------------------------------------------------------------- paramsHash: paramsHash to look for retval: modelId, or None if not found
def add_ip_address(self, ip_address, sync=True): """ add a ip address to this OS instance. :param ip_address: the ip address to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the subnet object on list to be added on next save(). :return: """ LOGGER.debug("OSInstance.add_ip_address") if not sync: self.ip_address_2_add.append(ip_address) else: if ip_address.id is None: ip_address.save() if self.id is not None and ip_address.id is not None: params = { 'id': self.id, 'ipAddressID': ip_address.id } args = {'http_operation': 'GET', 'operation_path': 'update/ipAddresses/add', 'parameters': params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.add_ip_address - Problem while updating OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.ip_address_ids.append(ip_address.id) ip_address.ipa_os_instance_id = self.id else: LOGGER.warning( 'OSInstance.add_ip_address - Problem while updating OS instance ' + self.name + '. Reason: IP Address ' + ip_address.ipAddress + ' id is None' )
add a ip address to this OS instance. :param ip_address: the ip address to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the subnet object on list to be added on next save(). :return:
def _maximization_step(X, posteriors): """ Update class parameters as below: priors: P(w_i) = sum_x P(w_i | x) ==> Then normalize to get in [0,1] Class means: center_w_i = sum_x P(w_i|x)*x / sum_i sum_x P(w_i|x) """ ### Prior probabilities or class weights sum_post_proba = np.sum(posteriors, axis=0) prior_proba = sum_post_proba / (sum_post_proba.sum() + Epsilon) ### means means = np.dot(posteriors.T, X) / (sum_post_proba[:, np.newaxis] + Epsilon) ### covariance matrices n_components = posteriors.shape[1] n_features = X.shape[1] covars = np.empty(shape=(n_components, n_features, n_features), dtype=float) for i in range(n_components): post_i = posteriors[:, i] mean_i = means[i] diff_i = X - mean_i with np.errstate(under='ignore'): covar_i = np.dot(post_i * diff_i.T, diff_i) / (post_i.sum() + Epsilon) covars[i] = covar_i + Lambda * np.eye(n_features) _validate_params(prior_proba, means, covars) return(prior_proba, means, covars)
Update class parameters as below: priors: P(w_i) = sum_x P(w_i | x) ==> Then normalize to get in [0,1] Class means: center_w_i = sum_x P(w_i|x)*x / sum_i sum_x P(w_i|x)
def set_stylesheet(self, subreddit, stylesheet): """Set stylesheet for the given subreddit. :returns: The json response from the server. """ subreddit = six.text_type(subreddit) data = {'r': subreddit, 'stylesheet_contents': stylesheet, 'op': 'save'} # Options: save / preview self.evict(self.config['stylesheet'].format(subreddit=subreddit)) return self.request_json(self.config['subreddit_css'], data=data)
Set stylesheet for the given subreddit. :returns: The json response from the server.
def keys(self): """ Access the keys :returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList :rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList """ if self._keys is None: self._keys = KeyList(self._version, fleet_sid=self._solution['sid'], ) return self._keys
Access the keys :returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList :rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList
def validate_basic_smoother_resid(): """Compare residuals.""" x, y = sort_data(*smoother_friedman82.build_sample_smoother_problem_friedman82()) plt.figure() for span in smoother.DEFAULT_SPANS: my_smoother = smoother.perform_smooth(x, y, span) _friedman_smooth, resids = run_friedman_smooth(x, y, span) # pylint: disable=unused-variable plt.plot(x, my_smoother.cross_validated_residual, '.-', label='pyace span = {0}'.format(span)) plt.plot(x, resids, '.-', label='Friedman span = {0}'.format(span)) finish_plot()
Compare residuals.
def read_field_report(path, data_flag = "*DATA", meta_data_flag = "*METADATA"): """ Reads a field output report. """ text = open(path).read() mdpos = text.find(meta_data_flag) dpos = text.find(data_flag) mdata = io.StringIO( "\n".join(text[mdpos:dpos].split("\n")[1:])) data = io.StringIO( "\n".join(text[dpos:].split("\n")[1:])) data = pd.read_csv(data, index_col = 0) data = data.groupby(data.index).mean() mdata = pd.read_csv(mdata, sep = "=", header = None, index_col = 0)[1] mdata = mdata.to_dict() out = {} out["step_num"] = int(mdata["step_num"]) out["step_label"] = mdata["step_label"] out["frame"] = int(mdata["frame"]) out["frame_value"] = float(mdata["frame_value"]) out["part"] = mdata["instance"] position_map = {"NODAL": "node", "ELEMENT_CENTROID": "element", "WHOLE_ELEMENT": "element"} out["position"] = position_map[mdata["position"]] out["label"] = mdata["label"] out["data"] = data field_class = getattr(argiope.mesh, mdata["argiope_class"]) return field_class(**out)
Reads a field output report.
def matrices_compliance(dsm, complete_mediation_matrix): """ Check if matrix and its mediation matrix are compliant. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. complete_mediation_matrix (list of list of int): 2-dim array Returns: bool: True if compliant, else False """ matrix = dsm.data rows_dep_matrix = len(matrix) cols_dep_matrix = len(matrix[0]) rows_med_matrix = len(complete_mediation_matrix) cols_med_matrix = len(complete_mediation_matrix[0]) if (rows_dep_matrix != rows_med_matrix or cols_dep_matrix != cols_med_matrix): raise DesignStructureMatrixError( 'Matrices are NOT compliant ' '(number of rows/columns not equal)') discrepancy_found = False message = [] for i in range(0, rows_dep_matrix): for j in range(0, cols_dep_matrix): if ((complete_mediation_matrix[i][j] == 0 and matrix[i][j] > 0) or (complete_mediation_matrix[i][j] == 1 and matrix[i][j] < 1)): discrepancy_found = True message.append( 'Untolerated dependency at %s:%s (%s:%s): ' '%s instead of %s' % ( i, j, dsm.entities[i], dsm.entities[j], matrix[i][j], complete_mediation_matrix[i][j])) message = '\n'.join(message) return not discrepancy_found, message
Check if matrix and its mediation matrix are compliant. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. complete_mediation_matrix (list of list of int): 2-dim array Returns: bool: True if compliant, else False
def from_mult_iters(cls, name=None, idx=None, **kwargs): """Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised. """ if not name: name = 'table' lengths = [len(v) for v in kwargs.values()] if len(set(lengths)) != 1: raise ValueError('Iterables must all be same length') if not idx: raise ValueError('Must provide iter name index reference') index = kwargs.pop(idx) vega_vals = [] for k, v in sorted(kwargs.items()): for idx, val in zip(index, v): value = {} value['idx'] = idx value['col'] = k value['val'] = val vega_vals.append(value) return cls(name, values=vega_vals)
Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised.
def asDateTime(self): """Create :py:class:`datetime.datetime` object from a |ASN.1| object. Returns ------- : new instance of :py:class:`datetime.datetime` object """ text = str(self) if text.endswith('Z'): tzinfo = TimeMixIn.UTC text = text[:-1] elif '-' in text or '+' in text: if '+' in text: text, plusminus, tz = string.partition(text, '+') else: text, plusminus, tz = string.partition(text, '-') if self._shortTZ and len(tz) == 2: tz += '00' if len(tz) != 4: raise error.PyAsn1Error('malformed time zone offset %s' % tz) try: minutes = int(tz[:2]) * 60 + int(tz[2:]) if plusminus == '-': minutes *= -1 except ValueError: raise error.PyAsn1Error('unknown time specification %s' % self) tzinfo = TimeMixIn.FixedOffset(minutes, '?') else: tzinfo = None if '.' in text or ',' in text: if '.' in text: text, _, ms = string.partition(text, '.') else: text, _, ms = string.partition(text, ',') try: ms = int(ms) * 1000 except ValueError: raise error.PyAsn1Error('bad sub-second time specification %s' % self) else: ms = 0 if self._optionalMinutes and len(text) - self._yearsDigits == 6: text += '0000' elif len(text) - self._yearsDigits == 8: text += '00' try: dt = dateandtime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S') except ValueError: raise error.PyAsn1Error('malformed datetime format %s' % self) return dt.replace(microsecond=ms, tzinfo=tzinfo)
Create :py:class:`datetime.datetime` object from a |ASN.1| object. Returns ------- : new instance of :py:class:`datetime.datetime` object
def extract_schemas_from_file(source_path): """Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted. """ logging.info("Extracting schemas from %s", source_path) try: with open(source_path, 'r') as source_file: source = source_file.read() except (FileNotFoundError, PermissionError) as e: logging.error("Cannot extract schemas: %s", e.strerror) else: try: schemas = extract_schemas_from_source(source, source_path) except SyntaxError as e: logging.error("Cannot extract schemas: %s", str(e)) else: logging.info( "Extracted %d %s", len(schemas), "schema" if len(schemas) == 1 else "schemas") return schemas
Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted.
def local_bind_hosts(self): """ Return a list containing the IP addresses listening for the tunnels """ self._check_is_started() return [_server.local_host for _server in self._server_list if _server.local_host is not None]
Return a list containing the IP addresses listening for the tunnels
def fill_from_simbad (self, ident, debug=False): """Fill in astrometric information using the Simbad web service. This uses the CDS Simbad web service to look up astrometric information for the source name *ident* and fills in attributes appropriately. Values from Simbad are not always reliable. Returns *self*. """ info = get_simbad_astrometry_info (ident, debug=debug) posref = 'unknown' for k, v in six.iteritems (info): if '~' in v: continue # no info if k == 'COO(d;A)': self.ra = float (v) * D2R elif k == 'COO(d;D)': self.dec = float (v) * D2R elif k == 'COO(E)': a = v.split () self.pos_u_maj = float (a[0]) * A2R * 1e-3 # mas -> rad self.pos_u_min = float (a[1]) * A2R * 1e-3 self.pos_u_pa = float (a[2]) * D2R elif k == 'COO(B)': posref = v elif k == 'PM(A)': self.promo_ra = float (v) # mas/yr elif k == 'PM(D)': self.promo_dec = float (v) # mas/yr elif k == 'PM(E)': a = v.split () self.promo_u_maj = float (a[0]) # mas/yr self.promo_u_min = float (a[1]) self.promo_u_pa = float (a[2]) * D2R # rad! elif k == 'PLX(V)': self.parallax = float (v) # mas elif k == 'PLX(E)': self.u_parallax = float (v) # mas elif k == 'RV(V)': self.vradial = float (v) # km/s elif k == 'RV(E)': self.u_vradial = float (v) #km/s if self.ra is None: raise Exception ('no position returned by Simbad for "%s"' % ident) if self.u_parallax == 0: self.u_parallax = None if self.u_vradial == 0: self.u_vradial = None # Get the right epoch of position for 2MASS positions if posref == '2003yCat.2246....0C': self.pos_epoch = get_2mass_epoch (self.ra, self.dec, debug) return self
Fill in astrometric information using the Simbad web service. This uses the CDS Simbad web service to look up astrometric information for the source name *ident* and fills in attributes appropriately. Values from Simbad are not always reliable. Returns *self*.
def __calculate_order(self, node_dict): """ Determine a valid ordering of the nodes in which a node is not called before all of it's dependencies. Raise an error if there is a cycle, or nodes are missing. """ if len(node_dict.keys()) != len(set(node_dict.keys())): raise DependencyTreeException("Duplicate Keys Exist in node dictionary!") valid_order = [node for node, dependencies in node_dict.items() if len(dependencies) == 0] remaining_nodes = [node for node in node_dict.keys() if node not in valid_order] while len(remaining_nodes) > 0: node_added = False for node in remaining_nodes: dependencies = [d for d in node_dict[node] if d not in valid_order] if len(dependencies) == 0: valid_order.append(node) remaining_nodes.remove(node) node_added = True if not node_added: # the tree must be invalid, as it was not possible to remove a node. # it's hard to find all the errors, so just spit out the first one you can find. invalid_node = remaining_nodes[0] invalid_dependency = ', '.join(node_dict[invalid_node]) if invalid_dependency not in remaining_nodes: raise DependencyTreeException( "Missing dependency! One or more of ({dependency}) are missing for {dependant}.".format( dependant=invalid_node, dependency=invalid_dependency)) else: raise DependencyTreeException("The dependency %s is cyclic or dependent on a cyclic dependency" % invalid_dependency) return valid_order
Determine a valid ordering of the nodes in which a node is not called before all of it's dependencies. Raise an error if there is a cycle, or nodes are missing.
def dict_sort(d, k): """ Sort a dictionary list by key :param d: dictionary list :param k: key :return: sorted dictionary list """ return sorted(d.copy(), key=lambda i: i[k])
Sort a dictionary list by key :param d: dictionary list :param k: key :return: sorted dictionary list
def remove_backup(name): ''' Remove an IIS Configuration backup from the System. .. versionadded:: 2017.7.0 Args: name (str): The name of the backup to remove Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_backup backup_20170209 ''' if name not in list_backups(): log.debug('Backup already removed: %s', name) return True ps_cmd = ['Remove-WebConfigurationBackup', '-Name', "'{0}'".format(name)] cmd_ret = _srvmgr(ps_cmd) if cmd_ret['retcode'] != 0: msg = 'Unable to remove web configuration: {0}\nError: {1}' \ ''.format(name, cmd_ret['stderr']) raise CommandExecutionError(msg) return name not in list_backups()
Remove an IIS Configuration backup from the System. .. versionadded:: 2017.7.0 Args: name (str): The name of the backup to remove Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_backup backup_20170209
def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS): """Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. """ # write file path if isinstance(target, string_types): with open(target, 'w') as fobj: return to_segwizard(segs, fobj, header=header, coltype=coltype) # write file object if header: print('# seg\tstart\tstop\tduration', file=target) for i, seg in enumerate(segs): a = coltype(seg[0]) b = coltype(seg[1]) c = float(b - a) print( '\t'.join(map(str, (i, a, b, c))), file=target, )
Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3.
def map(func, items, pool_size=10): """a parallelized work-alike to the built-in ``map`` function this function works by creating an :class:`OrderedPool` and placing all the arguments in :meth:`put<OrderedPool.put>` calls, then yielding items produced by the pool's :meth:`get<OrderedPool.get>` method. :param func: the mapper function to use :type func: function :param items: the items to use as the mapper's arguments :type items: iterable :param pool_size: the number of workers for the pool -- this amounts to the concurrency with which the map is accomplished (default 10) :type pool_size: int :returns: a lazy iterator (like python3's map or python2's itertools.imap) over the results of the mapping """ with OrderedPool(func, pool_size) as pool: for count, item in enumerate(items): pool.put(item) for i in xrange(count + 1): yield pool.get()
a parallelized work-alike to the built-in ``map`` function this function works by creating an :class:`OrderedPool` and placing all the arguments in :meth:`put<OrderedPool.put>` calls, then yielding items produced by the pool's :meth:`get<OrderedPool.get>` method. :param func: the mapper function to use :type func: function :param items: the items to use as the mapper's arguments :type items: iterable :param pool_size: the number of workers for the pool -- this amounts to the concurrency with which the map is accomplished (default 10) :type pool_size: int :returns: a lazy iterator (like python3's map or python2's itertools.imap) over the results of the mapping
def cmd(send, msg, args): """Handles quotes. Syntax: {command} <number|nick>, !quote --add <quote> --nick <nick> (--approve), !quote --list, !quote --delete <number>, !quote --edit <number> <quote> --nick <nick> !quote --search (--offset <num>) <number> """ session = args['db'] parser = arguments.ArgParser(args['config']) parser.add_argument('--approve', action='store_true') parser.add_argument('--nick', nargs='?') parser.add_argument('--offset', nargs='?', type=int, default=0) parser.add_argument('quote', nargs='*') group = parser.add_mutually_exclusive_group() group.add_argument('--list', action='store_true') group.add_argument('--add', action='store_true') group.add_argument('--delete', '--remove', type=int) group.add_argument('--edit', type=int) group.add_argument('--search', nargs='*') if not msg: send(do_get_quote(session)) return try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return if cmdargs.add: if args['type'] == 'privmsg': send("You want everybody to know about your witty sayings, right?") else: if cmdargs.nick is None: send('You must specify a nick.') elif not cmdargs.quote: send('You must specify a quote.') else: isadmin = args['is_admin'](args['nick']) or not args['config']['feature']['quoteapprove'] approved = cmdargs.approve or not args['config']['feature']['quoteapprove'] do_add_quote(cmdargs.nick, " ".join(cmdargs.quote), session, isadmin, approved, send, args) elif cmdargs.list: send(do_list_quotes(session, args['config']['core']['url'])) elif cmdargs.delete: send(do_delete_quote(args, session, cmdargs.delete)) elif cmdargs.edit: if args['is_admin'](args['nick']): send(do_update_quote(session, cmdargs.edit, cmdargs.nick, cmdargs.quote)) else: send("You aren't allowed to edit quotes. Please ask a bot admin to do it") elif cmdargs.search: if cmdargs.approve or cmdargs.nick: send("Invalid option for --search") else: send(search_quote(session, cmdargs.offset, cmdargs.search)) else: if msg.isdigit(): send(do_get_quote(session, int(msg))) else: if not re.match(args['config']['core']['nickregex'], msg): send('Invalid nick %s.' % msg) else: send(get_quotes_nick(session, msg))
Handles quotes. Syntax: {command} <number|nick>, !quote --add <quote> --nick <nick> (--approve), !quote --list, !quote --delete <number>, !quote --edit <number> <quote> --nick <nick> !quote --search (--offset <num>) <number>
def _convert_vpathlist(input_obj): """convert from 'list' or 'tuple' object to pgmagick.VPathList. :type input_obj: list or tuple """ vpl = pgmagick.VPathList() for obj in input_obj: # FIXME obj = pgmagick.PathMovetoAbs(pgmagick.Coordinate(obj[0], obj[1])) vpl.append(obj) return vpl
convert from 'list' or 'tuple' object to pgmagick.VPathList. :type input_obj: list or tuple
def _ReloadArtifacts(self): """Load artifacts from all sources.""" self._artifacts = {} self._LoadArtifactsFromFiles(self._sources.GetAllFiles()) self.ReloadDatastoreArtifacts()
Load artifacts from all sources.
def aggregate(self, key, aggregate, start=None, end=None, namespace=None, percentile=None): """Get an aggregate of all gauge data stored in the specified date range""" return self.make_context(key=key, aggregate=aggregate, start=start, end=end, namespace=namespace, percentile=percentile).aggregate()
Get an aggregate of all gauge data stored in the specified date range
def reprovision(vm, image, key='uuid'): ''' Reprovision a vm vm : string vm to be reprovisioned image : string uuid of new image key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.reprovision 186da9ab-7392-4f55-91a5-b8f1fe770543 c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 salt '*' vmadm.reprovision nacl c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 key=alias ''' ret = {} if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm if image not in __salt__['imgadm.list'](): ret['Error'] = 'Image ({0}) is not present on this host'.format(image) return ret # vmadm reprovision <uuid> [-f <filename>] cmd = six.text_type('echo {image} | vmadm reprovision {uuid}').format( uuid=salt.utils.stringutils.to_unicode(vm), image=_quote_args(salt.utils.json.dumps({'image_uuid': image})) ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True
Reprovision a vm vm : string vm to be reprovisioned image : string uuid of new image key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.reprovision 186da9ab-7392-4f55-91a5-b8f1fe770543 c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 salt '*' vmadm.reprovision nacl c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 key=alias
def refresh_access_token(self, refresh_token): ''' Refreshes the current access token. Gets a new access token, updates client auth and returns it. Args: refresh_token (str): Refresh token to use Returns: The new access token ''' request = self._get_request() response = request.post(self.OAUTH_TOKEN_URL, { "grant_type": "refresh_token", "refresh_token": refresh_token }) self.auth = HSAccessTokenAuth.from_response(response) return self.auth.access_token
Refreshes the current access token. Gets a new access token, updates client auth and returns it. Args: refresh_token (str): Refresh token to use Returns: The new access token
def ms_bot_framework(self) -> list: """Returns list of MS Bot Framework compatible states of the RichMessage instance nested controls. Returns: ms_bf_controls: MS Bot Framework representation of RichMessage instance nested controls. """ ms_bf_controls = [control.ms_bot_framework() for control in self.controls] return ms_bf_controls
Returns list of MS Bot Framework compatible states of the RichMessage instance nested controls. Returns: ms_bf_controls: MS Bot Framework representation of RichMessage instance nested controls.
def _get_file_handler(package_data_spec, data_files_spec): """Get a package_data and data_files handler command. """ class FileHandler(BaseCommand): def run(self): package_data = self.distribution.package_data package_spec = package_data_spec or dict() for (key, patterns) in package_spec.items(): package_data[key] = _get_package_data(key, patterns) self.distribution.data_files = _get_data_files( data_files_spec, self.distribution.data_files ) return FileHandler
Get a package_data and data_files handler command.
def write_collided_alias(collided_alias_dict): """ Write the collided aliases string into the collided alias file. """ # w+ creates the alias config file if it does not exist open_mode = 'r+' if os.path.exists(GLOBAL_COLLIDED_ALIAS_PATH) else 'w+' with open(GLOBAL_COLLIDED_ALIAS_PATH, open_mode) as collided_alias_file: collided_alias_file.truncate() collided_alias_file.write(json.dumps(collided_alias_dict))
Write the collided aliases string into the collided alias file.
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", predictionCol="prediction", numPartitions=None): """ setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \ predictionCol="prediction", numPartitions=None) """ kwargs = self._input_kwargs return self._set(**kwargs)
setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \ predictionCol="prediction", numPartitions=None)
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): """Decompress a block of data. Refer to LZMADecompressor's docstring for a description of the optional arguments *format*, *check* and *filters*. For incremental decompression, use a LZMADecompressor object instead. """ results = [] while True: decomp = LZMADecompressor(format, memlimit, filters) try: res = decomp.decompress(data) except LZMAError: if results: break # Leftover data is not a valid LZMA/XZ stream; ignore it. else: raise # Error on the first iteration; bail out. results.append(res) if not decomp.eof: raise LZMAError("Compressed data ended before the " "end-of-stream marker was reached") data = decomp.unused_data if not data: break return b"".join(results)
Decompress a block of data. Refer to LZMADecompressor's docstring for a description of the optional arguments *format*, *check* and *filters*. For incremental decompression, use a LZMADecompressor object instead.
def store(self, moments): """ Store object X with weight w """ if len(self.storage) == self.nsave: # merge if we must # print 'must merge' self.storage[-1].combine(moments, mean_free=self.remove_mean) else: # append otherwise # print 'append' self.storage.append(moments) # merge if possible while self._can_merge_tail(): # print 'merge: ',self.storage M = self.storage.pop() # print 'pop last: ',self.storage self.storage[-1].combine(M, mean_free=self.remove_mean)
Store object X with weight w
def get_new_project_name(self, project_name): """ Return a unique project name for the copy. :param project_name: str: name of project we will copy :return: str """ timestamp_str = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M') return "{} {}".format(project_name, timestamp_str)
Return a unique project name for the copy. :param project_name: str: name of project we will copy :return: str
def addAttachment(self, oid, file_path): """ Adds an attachment to a feature service Input: oid - string - OBJECTID value to add attachment to file_path - string - path to file Output: JSON Repsonse """ if self.hasAttachments == True: attachURL = self._url + "/%s/addAttachment" % oid params = {'f':'json'} parsed = urlparse.urlparse(attachURL) files = {'attachment': file_path} res = self._post(url=attachURL, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) return self._unicode_convert(res) else: return "Attachments are not supported for this feature service."
Adds an attachment to a feature service Input: oid - string - OBJECTID value to add attachment to file_path - string - path to file Output: JSON Repsonse
def get_cgroup_item(self, key): """ Returns the value for a given cgroup entry. A list is returned when multiple values are set. """ value = _lxc.Container.get_cgroup_item(self, key) if value is False: return False else: return value.rstrip("\n")
Returns the value for a given cgroup entry. A list is returned when multiple values are set.
def get_resource_agent_assignment_session_for_bin(self, bin_id): """Gets a resource agent session for the given bin. arg: bin_id (osid.id.Id): the ``Id`` of the bin return: (osid.resource.ResourceAgentAssignmentSession) - a ``ResourceAgentAssignmentSession`` raise: NotFound - ``bin_id`` not found raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_agent_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_agent_assignment()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_resource_agent_assignment(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.ResourceAgentAssignmentSession(bin_id, runtime=self._runtime)
Gets a resource agent session for the given bin. arg: bin_id (osid.id.Id): the ``Id`` of the bin return: (osid.resource.ResourceAgentAssignmentSession) - a ``ResourceAgentAssignmentSession`` raise: NotFound - ``bin_id`` not found raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_agent_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_agent_assignment()`` and ``supports_visible_federation()`` are ``true``.*
def _ensure_coroutine_function(func): """Return a coroutine function. func: either a coroutine function or a regular function Note a coroutine function is not a coroutine! """ if asyncio.iscoroutinefunction(func): return func else: @asyncio.coroutine def coroutine_function(evt): func(evt) yield return coroutine_function
Return a coroutine function. func: either a coroutine function or a regular function Note a coroutine function is not a coroutine!
def run( self, server=None, host=None, port=None, enable_pretty_logging=True ): """ 运行 WeRoBot。 :param server: 传递给 Bottle 框架 run 方法的参数,详情见\ `bottle 文档 <https://bottlepy.org/docs/dev/deployment.html#switching-the-server-backend>`_ :param host: 运行时绑定的主机地址 :param port: 运行时绑定的主机端口 :param enable_pretty_logging: 是否开启 log 的输出格式优化 """ if enable_pretty_logging: from werobot.logger import enable_pretty_logging enable_pretty_logging(self.logger) if server is None: server = self.config["SERVER"] if host is None: host = self.config["HOST"] if port is None: port = self.config["PORT"] try: self.wsgi.run(server=server, host=host, port=port) except KeyboardInterrupt: exit(0)
运行 WeRoBot。 :param server: 传递给 Bottle 框架 run 方法的参数,详情见\ `bottle 文档 <https://bottlepy.org/docs/dev/deployment.html#switching-the-server-backend>`_ :param host: 运行时绑定的主机地址 :param port: 运行时绑定的主机端口 :param enable_pretty_logging: 是否开启 log 的输出格式优化
def QA_fetch_get_stock_block(ip=None, port=None): '板块数据' ip, port = get_mainmarket_ip(ip, port) api = TdxHq_API() with api.connect(ip, port): data = pd.concat([api.to_df(api.get_and_parse_block_info("block_gn.dat")).assign(type='gn'), api.to_df(api.get_and_parse_block_info( "block.dat")).assign(type='yb'), api.to_df(api.get_and_parse_block_info( "block_zs.dat")).assign(type='zs'), api.to_df(api.get_and_parse_block_info("block_fg.dat")).assign(type='fg')]) if len(data) > 10: return data.assign(source='tdx').drop(['block_type', 'code_index'], axis=1).set_index('code', drop=False, inplace=False).drop_duplicates() else: QA_util_log_info('Wrong with fetch block ')
板块数据
def MaskSolveSlow(A, b, w=5, progress=True, niter=None): ''' Identical to `MaskSolve`, but computes the solution the brute-force way. ''' # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Iterate! The mask at step `n` goes from # data index `n` to data index `n+w-1` (inclusive). for n in prange(niter): mask = np.arange(n, n + w) An = np.delete(np.delete(A, mask, axis=0), mask, axis=1) Un = cholesky(An) bn = np.delete(b, mask) X[n] = cho_solve((Un, False), bn) return X
Identical to `MaskSolve`, but computes the solution the brute-force way.
def snip(tag="",start=-2,write_date=True): """ This function records a previously execute notebook cell into a file (default: ipython_history.py) a tag can be added to sort the cell `start` defines which cell in the history to record. Default is -2, ie. the one executed previously to the current one. """ import IPython i = IPython.get_ipython() last_history = i.history_manager.get_range(start=start,stop=start+1,output=True) with open("ipython_history.py",'a') as output_file: for l in last_history: global _session_description output_file.write('\n\n\n'+('#'*80)+'\n') if _session_description != "": output_file.write('#\n'+_lines_as_comments(_session_description)+'\n#\n') if tag != "": output_file.write(_lines_as_comments(tag)+'\n') if write_date: import datetime output_file.write('# '+datetime.datetime.now().isoformat()+'\n') output_file.write('\n\n# In ['+str(l[1])+']:\n'+l[2][0]) _last_inputs.append(l[2][0]) _tagged_inputs[tag] = _tagged_inputs.get(tag,[]) _tagged_inputs[tag].append(l[2][0]) output_file.write('\n\n# Out ['+str(l[1])+']:\n'+_lines_as_comments(repr(l[2][1])))
This function records a previously execute notebook cell into a file (default: ipython_history.py) a tag can be added to sort the cell `start` defines which cell in the history to record. Default is -2, ie. the one executed previously to the current one.
def remove_class(cls, *args): """Remove classes from the group. Parameters ---------- *args : `type` Classes to remove. """ for cls2 in args: try: del cls.classes[cls2.__name__] except KeyError: pass
Remove classes from the group. Parameters ---------- *args : `type` Classes to remove.
def asn1_generaltime_to_seconds(timestr): """The given string has one of the following formats YYYYMMDDhhmmssZ YYYYMMDDhhmmss+hhmm YYYYMMDDhhmmss-hhmm @return: a datetime object or None on error """ res = None timeformat = "%Y%m%d%H%M%S" try: res = datetime.strptime(timestr, timeformat + 'Z') except ValueError: try: res = datetime.strptime(timestr, timeformat + '%z') except ValueError: pass return res
The given string has one of the following formats YYYYMMDDhhmmssZ YYYYMMDDhhmmss+hhmm YYYYMMDDhhmmss-hhmm @return: a datetime object or None on error
def set_remote_config(experiment_config, port, config_file_name): '''Call setClusterMetadata to pass trial''' #set machine_list request_data = dict() request_data['machine_list'] = experiment_config['machineList'] if request_data['machine_list']: for i in range(len(request_data['machine_list'])): if isinstance(request_data['machine_list'][i].get('gpuIndices'), int): request_data['machine_list'][i]['gpuIndices'] = str(request_data['machine_list'][i].get('gpuIndices')) response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) err_message = '' if not response or not check_response(response): if response is not None: err_message = response.text _, stderr_full_path = get_log_path(config_file_name) with open(stderr_full_path, 'a+') as fout: fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) return False, err_message result, message = setNNIManagerIp(experiment_config, port, config_file_name) if not result: return result, message #set trial_config return set_trial_config(experiment_config, port, config_file_name), err_message
Call setClusterMetadata to pass trial
def get_ranking(self, alt): """ Description: Returns the ranking of a given alternative in the computed aggregate ranking. An error is thrown if the alternative does not exist. The ranking is the index in the aggregate ranking, which is 0-indexed. Parameters: alt: the key that represents an alternative """ if self.alts_to_ranks is None: raise ValueError("Aggregate ranking must be created first") try: rank = self.alts_to_ranks[alt] return rank except KeyError: raise KeyError("No alternative \"{}\" found in ".format(str(alt)) + "the aggregate ranking")
Description: Returns the ranking of a given alternative in the computed aggregate ranking. An error is thrown if the alternative does not exist. The ranking is the index in the aggregate ranking, which is 0-indexed. Parameters: alt: the key that represents an alternative
def SaltAndPepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None): """ Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels. dtype support:: See ``imgaug.augmenters.arithmetic.ReplaceElementwise``. Parameters ---------- p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional Probability of changing a pixel to salt/pepper noise. * If a float, then that value will be used for all images as the probability. * If a tuple ``(a, b)``, then a probability will be sampled per image from the range ``a <= x <= b``. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, then this parameter will be used as the *mask*, i.e. it is expected to contain values between 0.0 and 1.0, where 1.0 means that salt/pepper is to be added at that location. per_channel : bool or float, optional Whether to use the same value for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.SaltAndPepper(0.05) Replaces 5 percent of all pixels with salt/pepper. """ if name is None: name = "Unnamed%s" % (ia.caller_name(),) return ReplaceElementwise( mask=p, replacement=iap.Beta(0.5, 0.5) * 255, per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state )
Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels. dtype support:: See ``imgaug.augmenters.arithmetic.ReplaceElementwise``. Parameters ---------- p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional Probability of changing a pixel to salt/pepper noise. * If a float, then that value will be used for all images as the probability. * If a tuple ``(a, b)``, then a probability will be sampled per image from the range ``a <= x <= b``. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, then this parameter will be used as the *mask*, i.e. it is expected to contain values between 0.0 and 1.0, where 1.0 means that salt/pepper is to be added at that location. per_channel : bool or float, optional Whether to use the same value for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.SaltAndPepper(0.05) Replaces 5 percent of all pixels with salt/pepper.
def next_page(self): """ Fetches next result set. :return: VolumeCollection object. """ for link in self.links: if link.next: return self._load(link.next) raise PaginationError('No more entries.')
Fetches next result set. :return: VolumeCollection object.
def _is_not_archived(sysmeta_pyxb): """Assert that ``sysmeta_pyxb`` does not have have the archived flag set.""" if _is_archived(sysmeta_pyxb): raise d1_common.types.exceptions.InvalidSystemMetadata( 0, 'Archived flag was set. A new object created via create() or update() ' 'cannot already be archived. pid="{}"'.format( d1_common.xml.get_req_val(sysmeta_pyxb.identifier) ), identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), )
Assert that ``sysmeta_pyxb`` does not have have the archived flag set.
def write_event(self, *args): """ Write an event record:: writer.write_event(datetime.time(12, 34, 56), 'PEV') # -> B123456PEV writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text') # -> B123456PEVSome Text writer.write_event('PEV') # uses utcnow() # -> B121503PEV :param time: UTC time of the fix record (default: :meth:`~datetime.datetime.utcnow`) :param code: event type as three-letter-code :param text: additional text describing the event (optional) """ num_args = len(args) if not (1 <= num_args <= 3): raise ValueError('Invalid number of parameters received') if num_args == 3: time, code, text = args elif num_args == 1: code = args[0] time = text = None elif isinstance(args[0], (datetime.time, datetime.datetime)): time, code = args text = None else: code, text = args time = None if time is None: time = datetime.datetime.utcnow() if not patterns.THREE_LETTER_CODE.match(code): raise ValueError('Invalid event code') record = self.format_time(time) record += code if text: record += text self.write_record('E', record)
Write an event record:: writer.write_event(datetime.time(12, 34, 56), 'PEV') # -> B123456PEV writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text') # -> B123456PEVSome Text writer.write_event('PEV') # uses utcnow() # -> B121503PEV :param time: UTC time of the fix record (default: :meth:`~datetime.datetime.utcnow`) :param code: event type as three-letter-code :param text: additional text describing the event (optional)
def needs_distribute_ready(self): '''Determine whether or not we need to redistribute the ready state''' # Try to pre-empty starvation by comparing current RDY against # the last value sent. alive = [c for c in self.connections() if c.alive()] if any(c.ready <= (c.last_ready_sent * 0.25) for c in alive): return True
Determine whether or not we need to redistribute the ready state
def tear_down(self): """Tear down the virtual box machine """ if not self.browser_config.get('terminate'): self.warning_log("Skipping terminate") return self.info_log("Tearing down") if self.browser_config.get('platform').lower() == 'linux': self.execute_command("shutdown -h now", username='root') elif self.browser_config.get('platform').lower() == 'windows': self.session.console.power_down()
Tear down the virtual box machine
def Sens_m_sample(poly, dist, samples, rule="R"): """ First order sensitivity indices estimated using Saltelli's method. Args: poly (chaospy.Poly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Dist): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the first sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.basis(2, 2, dim=2) >>> print(poly) [q0^2, q0q1, q1^2] >>> print(numpy.around(Sens_m_sample(poly, dist, 10000, rule="M"), 4)) [[0.008 0.0026 0. ] [0. 0.6464 2.1321]] """ dim = len(dist) generator = Saltelli(dist, samples, poly, rule=rule) zeros = [0]*dim ones = [1]*dim index = [0]*dim variance = numpy.var(generator[zeros], -1) matrix_0 = generator[zeros] matrix_1 = generator[ones] mean = .5*(numpy.mean(matrix_1) + numpy.mean(matrix_0)) matrix_0 -= mean matrix_1 -= mean out = [ numpy.mean(matrix_1*((generator[index]-mean)-matrix_0), -1) / numpy.where(variance, variance, 1) for index in numpy.eye(dim, dtype=bool) ] return numpy.array(out)
First order sensitivity indices estimated using Saltelli's method. Args: poly (chaospy.Poly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Dist): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the first sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.basis(2, 2, dim=2) >>> print(poly) [q0^2, q0q1, q1^2] >>> print(numpy.around(Sens_m_sample(poly, dist, 10000, rule="M"), 4)) [[0.008 0.0026 0. ] [0. 0.6464 2.1321]]
def today(boo): """ Return today's date as either a String or a Number, as specified by the User. Args: boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30") Returns: either a Number or a string, dependent upon the user's input """ tod = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d') if boo: return int(str(tod).replace('-', '')[:8]) else: return str(tod)[:10]
Return today's date as either a String or a Number, as specified by the User. Args: boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30") Returns: either a Number or a string, dependent upon the user's input
def maximum(self, node): """ find the max node when node regard as a root node :param node: :return: max node """ temp_node = node while temp_node.right is not None: temp_node = temp_node.right return temp_node
find the max node when node regard as a root node :param node: :return: max node
def _get_values(self, data_blob, dtype_enum, shape_string): """Obtains values for histogram data given blob and dtype enum. Args: data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. shape_string: A comma-separated string of numbers denoting shape. Returns: The histogram values as a list served to the frontend. """ buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype) return buf.reshape([int(i) for i in shape_string.split(',')]).tolist()
Obtains values for histogram data given blob and dtype enum. Args: data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. shape_string: A comma-separated string of numbers denoting shape. Returns: The histogram values as a list served to the frontend.
def user(self, obj, with_user_activity=False, follow_flag=None, **kwargs): """Create a stream of the most recent actions by objects that the user is following.""" q = Q() qs = self.public() if not obj: return qs.none() check(obj) if with_user_activity: q = q | Q( actor_content_type=ContentType.objects.get_for_model(obj), actor_object_id=obj.pk ) follows = apps.get_model('actstream', 'follow').objects.filter(user=obj) if follow_flag: follows = follows.filter(flag=follow_flag) content_types = ContentType.objects.filter( pk__in=follows.values('content_type_id') ) if not (content_types.exists() or with_user_activity): return qs.none() for content_type in content_types: object_ids = follows.filter(content_type=content_type) q = q | Q( actor_content_type=content_type, actor_object_id__in=object_ids.values('object_id') ) | Q( target_content_type=content_type, target_object_id__in=object_ids.filter( actor_only=False).values('object_id') ) | Q( action_object_content_type=content_type, action_object_object_id__in=object_ids.filter( actor_only=False).values('object_id') ) return qs.filter(q, **kwargs)
Create a stream of the most recent actions by objects that the user is following.
def add_parameter_dd(self, dag_tag, node_dict): """ helper function for adding parameters in condition Parameters --------------- dag_tag: etree SubElement the DAG tag is contained in this subelement node_dict: dictionary the decision diagram dictionary Return --------------- None """ if isinstance(node_dict, defaultdict) or isinstance(node_dict, dict): node_tag = etree.SubElement(dag_tag, 'Node', attrib={'var': next(iter(node_dict.keys()))}) edge_dict = next(iter(node_dict.values())) for edge in sorted(edge_dict.keys(), key=tuple): edge_tag = etree.SubElement(node_tag, 'Edge', attrib={'val': edge}) value = edge_dict.get(edge) if isinstance(value, six.string_types): terminal_tag = etree.SubElement(edge_tag, 'Terminal') terminal_tag.text = value elif 'type' in value: if 'val' in value: etree.SubElement(edge_tag, 'SubDAG', attrib={'type': value['type'], 'var': value['var'], 'val': value['val']}) elif 'idref' in value: etree.SubElement(edge_tag, 'SubDAG', attrib={'type': value['type'], 'idref': value['idref']}) else: etree.SubElement(edge_tag, 'SubDAG', attrib={'type': value['type'], 'var': value['var']}) else: self.add_parameter_dd(edge_tag, value)
helper function for adding parameters in condition Parameters --------------- dag_tag: etree SubElement the DAG tag is contained in this subelement node_dict: dictionary the decision diagram dictionary Return --------------- None
def format_extension(self): """The format extension of asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.format_extension '.js' >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension '.js' """ for extension in reversed(self.extensions): compiler = self.environment.compilers.get(extension) if not compiler and self.environment.mimetypes.get(extension): return extension
The format extension of asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.format_extension '.js' >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension '.js'
def setParts( self, parts ): """ Sets the path for this edit widget by providing the parts to the path. :param parts | [<str>, ..] """ self.setText(self.separator().join(map(str, parts)))
Sets the path for this edit widget by providing the parts to the path. :param parts | [<str>, ..]
def parse_string_descriptor(string_desc): """Parse a string descriptor of a streamer into a DataStreamer object. Args: string_desc (str): The string descriptor that we wish to parse. Returns: DataStreamer: A DataStreamer object representing the streamer. """ if not isinstance(string_desc, str): string_desc = str(string_desc) if not string_desc.endswith(';'): string_desc += ';' parsed = get_streamer_parser().parseString(string_desc)[0] realtime = 'realtime' in parsed broadcast = 'broadcast' in parsed encrypted = 'security' in parsed and parsed['security'] == 'encrypted' signed = 'security' in parsed and parsed['security'] == 'signed' auto = 'manual' not in parsed with_other = None if 'with_other' in parsed: with_other = parsed['with_other'] auto = False dest = SlotIdentifier.FromString('controller') if 'explicit_tile' in parsed: dest = parsed['explicit_tile'] selector = parsed['selector'] # Make sure all of the combination are valid if realtime and (encrypted or signed): raise SensorGraphSemanticError("Realtime streamers cannot be either signed or encrypted") if broadcast and (encrypted or signed): raise SensorGraphSemanticError("Broadcast streamers cannot be either signed or encrypted") report_type = 'broadcast' if broadcast else 'telegram' dest = dest selector = selector if realtime or broadcast: report_format = u'individual' elif signed: report_format = u'signedlist_userkey' elif encrypted: raise SensorGraphSemanticError("Encrypted streamers are not yet supported") else: report_format = u'hashedlist' return DataStreamer(selector, dest, report_format, auto, report_type=report_type, with_other=with_other)
Parse a string descriptor of a streamer into a DataStreamer object. Args: string_desc (str): The string descriptor that we wish to parse. Returns: DataStreamer: A DataStreamer object representing the streamer.
def process(self, index=None): """ This will completely process a directory of elevation tiles (as supplied in the constructor). Both phases of the calculation, the single tile and edge resolution phases are run. Parameters ----------- index : int/slice (optional) Default None - processes all tiles in a directory. See :py:func:`process_twi` for additional options. """ # Round 0 of twi processing, process the magnitude and directions of # slopes print "Starting slope calculation round" self.process_twi(index, do_edges=False, skip_uca_twi=True) # Round 1 of twi processing print "Starting self-area calculation round" self.process_twi(index, do_edges=False) # Round 2 of twi processing: edge resolution i = self.tile_edge.find_best_candidate(self.elev_source_files) print "Starting edge resolution round: ", count = 0 i_old = -1 same_count = 0 while i is not None and same_count < 3: count += 1 print '*' * 10 print count, '(%d -- > %d) .' % (i_old, i) # %% self.process_twi(i, do_edges=True) i_old = i i = self.tile_edge.find_best_candidate(self.elev_source_files) if i_old == i: same_count += 1 else: same_count = 0 print '*'*79 print '******* PROCESSING COMPLETED *******' print '*'*79 return self
This will completely process a directory of elevation tiles (as supplied in the constructor). Both phases of the calculation, the single tile and edge resolution phases are run. Parameters ----------- index : int/slice (optional) Default None - processes all tiles in a directory. See :py:func:`process_twi` for additional options.
def models_to_table(obj, params=True): r""" Converts a ModelsDict object to a ReST compatible table Parameters ---------- obj : OpenPNM object Any object that has a ``models`` attribute params : boolean Indicates whether or not to include a list of parameter values in the table. Set to False for just a list of models, and True for a more verbose table with all parameter values. """ if not hasattr(obj, 'models'): raise Exception('Received object does not have any models') row = '+' + '-'*4 + '+' + '-'*22 + '+' + '-'*18 + '+' + '-'*26 + '+' fmt = '{0:1s} {1:2s} {2:1s} {3:20s} {4:1s} {5:16s} {6:1s} {7:24s} {8:1s}' lines = [] lines.append(row) lines.append(fmt.format('|', '#', '|', 'Property Name', '|', 'Parameter', '|', 'Value', '|')) lines.append(row.replace('-', '=')) for i, item in enumerate(obj.models.keys()): prop = item if len(prop) > 20: prop = item[:17] + "..." temp = obj.models[item].copy() model = str(temp.pop('model')).split(' ')[1] lines.append(fmt.format('|', str(i+1), '|', prop, '|', 'model:', '|', model, '|')) lines.append(row) if params: for param in temp.keys(): p1 = param if len(p1) > 16: p1 = p1[:14] + '...' p2 = str(temp[param]) if len(p2) > 24: p2 = p2[:21] + '...' lines.append(fmt.format('|', '', '|', '', '|', p1, '|', p2, '|')) lines.append(row) return '\n'.join(lines)
r""" Converts a ModelsDict object to a ReST compatible table Parameters ---------- obj : OpenPNM object Any object that has a ``models`` attribute params : boolean Indicates whether or not to include a list of parameter values in the table. Set to False for just a list of models, and True for a more verbose table with all parameter values.
def van_image_enc_2d(x, first_depth, reuse=False, hparams=None): """The image encoder for the VAN. Similar architecture as Ruben's paper (http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf). Args: x: The image to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. hparams: The python hparams. Returns: The encoded image. """ with tf.variable_scope('van_image_enc', reuse=reuse): enc_history = [x] enc = tf.layers.conv2d( x, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d( enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') enc = tf.nn.dropout(enc, hparams.van_keep_prob) enc = tf.contrib.layers.layer_norm(enc) enc_history.append(enc) enc = tf.layers.conv2d( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') enc = tf.nn.dropout(enc, hparams.van_keep_prob) enc = tf.contrib.layers.layer_norm(enc) enc_history.append(enc) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') return enc, enc_history
The image encoder for the VAN. Similar architecture as Ruben's paper (http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf). Args: x: The image to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. hparams: The python hparams. Returns: The encoded image.
def adjust_frame(self, pos, absolute_pos): """Adjust stack frame by pos positions. If absolute_pos then pos is an absolute number. Otherwise it is a relative number. A negative number indexes from the other end.""" if not self.curframe: Mmsg.errmsg(self, "No stack.") return # Below we remove any negativity. At the end, pos will be # the new value of self.curindex. if absolute_pos: if pos >= 0: pos = len(self.stack)-pos-1 else: pos = -pos-1 else: pos += self.curindex if pos < 0: Mmsg.errmsg(self, "Adjusting would put us beyond the oldest frame.") return elif pos >= len(self.stack): Mmsg.errmsg(self, "Adjusting would put us beyond the newest frame.") return self.curindex = pos self.curframe = self.stack[self.curindex][0] self.print_location() self.list_lineno = None return
Adjust stack frame by pos positions. If absolute_pos then pos is an absolute number. Otherwise it is a relative number. A negative number indexes from the other end.
def _lookup_online(word): """Look up word on diki.pl. Parameters ---------- word : str Word too look up. Returns ------- str website HTML content. """ URL = "https://www.diki.pl/{word}" HEADERS = { "User-Agent": ( "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; " "Trident/7.0; rv:11.0) like Gecko" ) } logger.debug("Looking up online: %s", word) quoted_word = urllib.parse.quote(word) req = urllib.request.Request(URL.format(word=quoted_word), headers=HEADERS) with urllib.request.urlopen(req) as response: html_string = response.read().decode() return html.unescape(html_string)
Look up word on diki.pl. Parameters ---------- word : str Word too look up. Returns ------- str website HTML content.
def isclose(a, b, rel_tol=1e-9, abs_tol=0.0): '''Pure python and therefore slow version of the standard library isclose. Works on older versions of python though! Hasn't been unit tested, but has been tested. manual unit testing: from math import isclose as isclose2 from random import uniform for i in range(10000000): a = uniform(-1, 1) b = uniform(-1, 1) rel_tol = uniform(0, 1) abs_tol = uniform(0, .001) ans1 = isclose(a, b, rel_tol, abs_tol) ans2 = isclose2(a, b, rel_tol=rel_tol, abs_tol=abs_tol) try: assert ans1 == ans2 except: print(a, b, rel_tol, abs_tol) ''' if (rel_tol < 0.0 or abs_tol < 0.0 ): raise ValueError('Negative tolerances') if ((a.real == b.real) and (a.imag == b.imag)): return True if (isinf(a.real) or isinf(a.imag) or isinf(b.real) or isinf(b.imag)): return False diff = abs(a - b) return (((diff <= rel_tol*abs(b)) or (diff <= rel_tol*abs(a))) or (diff <= abs_tol))
Pure python and therefore slow version of the standard library isclose. Works on older versions of python though! Hasn't been unit tested, but has been tested. manual unit testing: from math import isclose as isclose2 from random import uniform for i in range(10000000): a = uniform(-1, 1) b = uniform(-1, 1) rel_tol = uniform(0, 1) abs_tol = uniform(0, .001) ans1 = isclose(a, b, rel_tol, abs_tol) ans2 = isclose2(a, b, rel_tol=rel_tol, abs_tol=abs_tol) try: assert ans1 == ans2 except: print(a, b, rel_tol, abs_tol)
def create_runtime(self, env, # type: MutableMapping[Text, Text] runtime_context # type: RuntimeContext ): # type: (...) -> Tuple[List, Optional[Text]] """ Returns the Singularity runtime list of commands and options.""" any_path_okay = self.builder.get_requirement("DockerRequirement")[1] \ or False runtime = [u"singularity", u"--quiet", u"exec", u"--contain", u"--pid", u"--ipc"] if _singularity_supports_userns(): runtime.append(u"--userns") runtime.append(u"--bind") runtime.append(u"{}:{}:rw".format( docker_windows_path_adjust(os.path.realpath(self.outdir)), self.builder.outdir)) runtime.append(u"--bind") tmpdir = "/tmp" # nosec runtime.append(u"{}:{}:rw".format( docker_windows_path_adjust(os.path.realpath(self.tmpdir)), tmpdir)) self.add_volumes(self.pathmapper, runtime, any_path_okay=True, secret_store=runtime_context.secret_store, tmpdir_prefix=runtime_context.tmpdir_prefix) if self.generatemapper is not None: self.add_volumes( self.generatemapper, runtime, any_path_okay=any_path_okay, secret_store=runtime_context.secret_store, tmpdir_prefix=runtime_context.tmpdir_prefix) runtime.append(u"--pwd") runtime.append(u"%s" % (docker_windows_path_adjust(self.builder.outdir))) if runtime_context.custom_net: raise UnsupportedRequirement( "Singularity implementation does not support custom networking") elif runtime_context.disable_net: runtime.append(u"--net") env["SINGULARITYENV_TMPDIR"] = tmpdir env["SINGULARITYENV_HOME"] = self.builder.outdir for name, value in self.environment.items(): env["SINGULARITYENV_{}".format(name)] = str(value) return (runtime, None)
Returns the Singularity runtime list of commands and options.
def _iter_enum_member_values(eid, bitmask): """Iterate member values with given bitmask inside the enum Note that `DEFMASK` can either indicate end-of-values or a valid value. Iterate serials to tell apart. """ value = idaapi.get_first_enum_member(eid, bitmask) yield value while value != DEFMASK: value = idaapi.get_next_enum_member(eid, value, bitmask) yield value
Iterate member values with given bitmask inside the enum Note that `DEFMASK` can either indicate end-of-values or a valid value. Iterate serials to tell apart.
def info(self, message, payload=None): """DEPRECATED""" if payload: self.log(event=message, payload=payload) else: self.log(event=message) return self
DEPRECATED
def rendered(self): """The rendered wire format for all conditions that have been rendered. Rendered conditions are never cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.""" expressions = {k: v for (k, v) in self.expressions.items() if v is not None} if self.refs.attr_names: expressions["ExpressionAttributeNames"] = self.refs.attr_names if self.refs.attr_values: expressions["ExpressionAttributeValues"] = self.refs.attr_values return expressions
The rendered wire format for all conditions that have been rendered. Rendered conditions are never cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.
def windowed_run_events(da, window, dim='time'): """Return the number of runs of a minimum length. Parameters ---------- da: N-dimensional Xarray data array (boolean) Input data array window : int Minimum run length. dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- out : N-dimensional xarray data array (int) Number of distinct runs of a minimum length. """ d = rle(da, dim=dim) out = (d >= window).sum(dim=dim) return out
Return the number of runs of a minimum length. Parameters ---------- da: N-dimensional Xarray data array (boolean) Input data array window : int Minimum run length. dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- out : N-dimensional xarray data array (int) Number of distinct runs of a minimum length.
def post_unpack_merkleblock(d, f): """ A post-processing "post_unpack" to merkleblock messages. It validates the merkle proofs (throwing an exception if there's an error), and returns the list of transaction hashes in "tx_hashes". The transactions are supposed to be sent immediately after the merkleblock message. """ level_widths = [] count = d["total_transactions"] while count > 1: level_widths.append(count) count += 1 count //= 2 level_widths.append(1) level_widths.reverse() tx_acc = [] flags = d["flags"] hashes = list(reversed(d["hashes"])) left_hash, flag_index = _recurse(level_widths, 0, 0, hashes, flags, 0, tx_acc) if len(hashes) > 0: raise ValueError("extra hashes: %s" % hashes) idx, r = divmod(flag_index-1, 8) if idx != len(flags) - 1: raise ValueError("not enough flags consumed") if flags[idx] > (1 << (r+1))-1: raise ValueError("unconsumed 1 flag bits set") if left_hash != d["header"].merkle_root: raise ValueError( "merkle root %s does not match calculated hash %s" % ( b2h_rev(d["header"].merkle_root), b2h_rev(left_hash))) d["tx_hashes"] = tx_acc return d
A post-processing "post_unpack" to merkleblock messages. It validates the merkle proofs (throwing an exception if there's an error), and returns the list of transaction hashes in "tx_hashes". The transactions are supposed to be sent immediately after the merkleblock message.
def set_figure(self, figure, handle=None): """Call this with the Bokeh figure object.""" self.figure = figure self.bkimage = None self._push_handle = handle wd = figure.plot_width ht = figure.plot_height self.configure_window(wd, ht) doc = curdoc() #self.logger.info(str(dir(doc))) doc.add_periodic_callback(self.timer_cb, 50) self.logger.info("figure set")
Call this with the Bokeh figure object.
def get_feeds(self, project=None, feed_role=None, include_deleted_upstreams=None): """GetFeeds. [Preview API] Get all feeds in an account where you have the provided role access. :param str project: Project ID or project name :param str feed_role: Filter by this role, either Administrator(4), Contributor(3), or Reader(2) level permissions. :param bool include_deleted_upstreams: Include upstreams that have been deleted in the response. :rtype: [Feed] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if feed_role is not None: query_parameters['feedRole'] = self._serialize.query('feed_role', feed_role, 'str') if include_deleted_upstreams is not None: query_parameters['includeDeletedUpstreams'] = self._serialize.query('include_deleted_upstreams', include_deleted_upstreams, 'bool') response = self._send(http_method='GET', location_id='c65009a7-474a-4ad1-8b42-7d852107ef8c', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Feed]', self._unwrap_collection(response))
GetFeeds. [Preview API] Get all feeds in an account where you have the provided role access. :param str project: Project ID or project name :param str feed_role: Filter by this role, either Administrator(4), Contributor(3), or Reader(2) level permissions. :param bool include_deleted_upstreams: Include upstreams that have been deleted in the response. :rtype: [Feed]
def fix_addresses(start=None, end=None): """Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end) """ if start in (None, idaapi.BADADDR): start = idaapi.cvar.inf.minEA if end in (None, idaapi.BADADDR): end = idaapi.cvar.inf.maxEA return start, end
Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end)
def resolve_path_from_base(path_to_resolve, base_path): """ If path-to_resolve is a relative path, create an absolute path with base_path as the base. If path_to_resolve is an absolute path or a user path (~), just resolve it to an absolute path and return. """ return os.path.abspath( os.path.join( base_path, os.path.expanduser(path_to_resolve)))
If path-to_resolve is a relative path, create an absolute path with base_path as the base. If path_to_resolve is an absolute path or a user path (~), just resolve it to an absolute path and return.
def get_list(self): """ Extract from a DSL aggregated response the values for each bucket :return: a list with the values in a DSL aggregated response """ field = self.FIELD_NAME query = ElasticQuery.get_agg(field=field, date_field=self.FIELD_DATE, start=self.start, end=self.end, filters=self.esfilters) logger.debug("Metric: '%s' (%s); Query: %s", self.name, self.id, query) res = self.get_metrics_data(query) list_ = {field: [], "value": []} for bucket in res['aggregations'][str(ElasticQuery.AGGREGATION_ID)]['buckets']: list_[field].append(bucket['key']) list_['value'].append(bucket['doc_count']) return list_
Extract from a DSL aggregated response the values for each bucket :return: a list with the values in a DSL aggregated response
def essays(self): """Copy essays from the source profile to the destination profile.""" for essay_name in self.dest_user.profile.essays.essay_names: setattr(self.dest_user.profile.essays, essay_name, getattr(self.source_profile.essays, essay_name))
Copy essays from the source profile to the destination profile.
def adjgraph(args): """ %prog adjgraph adjacency.txt subgraph.txt Construct adjacency graph for graphviz. The file may look like sample below. The lines with numbers are chromosomes with gene order information. genome 0 chr 0 -1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360 chr 1 138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143 """ import pygraphviz as pgv from jcvi.utils.iter import pairwise from jcvi.formats.base import SetFile p = OptionParser(adjgraph.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) infile, subgraph = args subgraph = SetFile(subgraph) subgraph = set(x.strip("-") for x in subgraph) G = pgv.AGraph(strict=False) # allow multi-edge SG = pgv.AGraph(strict=False) palette = ("green", "magenta", "tomato", "peachpuff") fp = open(infile) genome_id = -1 key = 0 for row in fp: if row.strip() == "": continue atoms = row.split() tag = atoms[0] if tag in ("ChrNumber", "chr"): continue if tag == "genome": genome_id += 1 gcolor = palette[genome_id] continue nodeseq = [] for p in atoms: np = p.strip("-") nodeL, nodeR = np + "L", np + "R" if p[0] == "-": # negative strand nodeseq += [nodeR, nodeL] else: nodeseq += [nodeL, nodeR] for a, b in pairwise(nodeseq): G.add_edge(a, b, key, color=gcolor) key += 1 na, nb = a[:-1], b[:-1] if na not in subgraph and nb not in subgraph: continue SG.add_edge(a, b, key, color=gcolor) G.graph_attr.update(dpi="300") fw = open("graph.dot", "w") G.write(fw) fw.close() fw = open("subgraph.dot", "w") SG.write(fw) fw.close()
%prog adjgraph adjacency.txt subgraph.txt Construct adjacency graph for graphviz. The file may look like sample below. The lines with numbers are chromosomes with gene order information. genome 0 chr 0 -1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360 chr 1 138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143
def _vpcs_path(self): """ Returns the VPCS executable path. :returns: path to VPCS """ search_path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs") path = shutil.which(search_path) # shutil.which return None if the path doesn't exists if not path: return search_path return path
Returns the VPCS executable path. :returns: path to VPCS
def show_vcs_output_vcs_nodes_vcs_node_info_node_switchname(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_vcs = ET.Element("show_vcs") config = show_vcs output = ET.SubElement(show_vcs, "output") vcs_nodes = ET.SubElement(output, "vcs-nodes") vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info") node_switchname = ET.SubElement(vcs_node_info, "node-switchname") node_switchname.text = kwargs.pop('node_switchname') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def tag(**tags): """ Tags current transaction. Both key and value of the tag should be strings. """ transaction = execution_context.get_transaction() if not transaction: error_logger.warning("Ignored tags %s. No transaction currently active.", ", ".join(tags.keys())) else: transaction.tag(**tags)
Tags current transaction. Both key and value of the tag should be strings.
def intake_path_dirs(path): """Return a list of directories from the intake path. If a string, perhaps taken from an environment variable, then the list of paths will be split on the character ":" for posix of ";" for windows. Protocol indicators ("protocol://") will be ignored. """ if isinstance(path, (list, tuple)): return path import re pattern = re.compile(";" if os.name == 'nt' else r"(?<!:):(?![:/])") return pattern.split(path)
Return a list of directories from the intake path. If a string, perhaps taken from an environment variable, then the list of paths will be split on the character ":" for posix of ";" for windows. Protocol indicators ("protocol://") will be ignored.
def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReferenceSet. """ ret = protocol.ReferenceSet() ret.assembly_id = pb.string(self.getAssemblyId()) ret.description = pb.string(self.getDescription()) ret.id = self.getId() ret.is_derived = self.getIsDerived() ret.md5checksum = self.getMd5Checksum() if self.getSpecies(): term = protocol.fromJson( json.dumps(self.getSpecies()), protocol.OntologyTerm) ret.species.term_id = term.term_id ret.species.term = term.term ret.source_accessions.extend(self.getSourceAccessions()) ret.source_uri = pb.string(self.getSourceUri()) ret.name = self.getLocalId() self.serializeAttributes(ret) return ret
Returns the GA4GH protocol representation of this ReferenceSet.
def send_email(sender, pw, to, subject, content, files=None, service='163'): """send email, recommended use 163 mailbox service, as it is tested. :param sender: str email address of sender :param pw: str password for sender :param to: str email addressee :param subject: str subject of email :param content: str content of email :param files: list path list of attachments :param service: str smtp server address, optional is ['163', 'qq'] :return: None """ se = EmailSender(from_=sender, pw=pw, service=service) se.send_email(to=to, subject=subject, content=content, files=files) se.quit()
send email, recommended use 163 mailbox service, as it is tested. :param sender: str email address of sender :param pw: str password for sender :param to: str email addressee :param subject: str subject of email :param content: str content of email :param files: list path list of attachments :param service: str smtp server address, optional is ['163', 'qq'] :return: None
def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None): """Places an order for modifying an existing block volume. :param volume_id: The ID of the volume to be modified :param new_size: The new size/capacity for the volume :param new_iops: The new IOPS for the volume :param new_tier_level: The new tier level for the volume :return: Returns a SoftLayer_Container_Product_Order_Receipt """ mask_items = [ 'id', 'billingItem', 'storageType[keyName]', 'capacityGb', 'provisionedIops', 'storageTierLevel', 'staasVersion', 'hasEncryptionAtRest', ] block_mask = ','.join(mask_items) volume = self.get_block_volume_details(volume_id, mask=block_mask) order = storage_utils.prepare_modify_order_object( self, volume, new_iops, new_tier_level, new_size ) return self.client.call('Product_Order', 'placeOrder', order)
Places an order for modifying an existing block volume. :param volume_id: The ID of the volume to be modified :param new_size: The new size/capacity for the volume :param new_iops: The new IOPS for the volume :param new_tier_level: The new tier level for the volume :return: Returns a SoftLayer_Container_Product_Order_Receipt
def unregister_callback(callback_id): """unregister a callback registration""" global _callbacks obj = _callbacks.pop(callback_id, None) threads = [] if obj is not None: t, quit = obj quit.set() threads.append(t) for t in threads: t.join()
unregister a callback registration
def show_image(img:Image, ax:plt.Axes=None, figsize:tuple=(3,3), hide_axis:bool=True, cmap:str='binary', alpha:float=None, **kwargs)->plt.Axes: "Display `Image` in notebook." if ax is None: fig,ax = plt.subplots(figsize=figsize) ax.imshow(image2np(img.data), cmap=cmap, alpha=alpha, **kwargs) if hide_axis: ax.axis('off') return ax
Display `Image` in notebook.
def accept(self, visitor: "BaseVisitor[ResultT]") -> ResultT: """ Traverses the game in PGN order using the given *visitor*. Returns the *visitor* result. """ if visitor.begin_game() is not SKIP: for tagname, tagvalue in self.headers.items(): visitor.visit_header(tagname, tagvalue) if visitor.end_headers() is not SKIP: board = self.board() visitor.visit_board(board) if self.comment: visitor.visit_comment(self.comment) if self.variations: self.variations[0].accept(visitor, _parent_board=board) visitor.visit_result(self.headers.get("Result", "*")) visitor.end_game() return visitor.result()
Traverses the game in PGN order using the given *visitor*. Returns the *visitor* result.
def _scan_response(self): """Create scan response data.""" voltage = struct.pack("<H", int(self.voltage*256)) reading = struct.pack("<HLLL", 0xFFFF, 0, 0, 0) response = voltage + reading return response
Create scan response data.
def get_secret(key, *args, **kwargs): """Retrieves a secret.""" env_value = os.environ.get(key.replace('.', '_').upper()) if not env_value: # Backwards compatibility: the deprecated secrets vault return _get_secret_from_vault(key, *args, **kwargs) return env_value
Retrieves a secret.
def rentry_exists_on_disk(self, name): """ Searches through the file/dir entries of the current *and* all its remote directories (repos), and returns True if a physical entry with the given name could be found. The local directory (self) gets searched first, so repositories take a lower precedence regarding the searching order. @see entry_exists_on_disk """ rentry_exists = self.entry_exists_on_disk(name) if not rentry_exists: # Search through the repository folders norm_name = _my_normcase(name) for rdir in self.get_all_rdirs(): try: node = rdir.entries[norm_name] if node: rentry_exists = True break except KeyError: if rdir.entry_exists_on_disk(name): rentry_exists = True break return rentry_exists
Searches through the file/dir entries of the current *and* all its remote directories (repos), and returns True if a physical entry with the given name could be found. The local directory (self) gets searched first, so repositories take a lower precedence regarding the searching order. @see entry_exists_on_disk
def token_submit(self, token_id, json_data={}): """ Submits a given token, along with optional data """ uri = 'tokens/%s' % token_id post_body = json.dumps(json_data) resp, body = self.post(uri, post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Submits a given token, along with optional data
def _get_ldflags(): """Determine the correct link flags. This attempts dummy compiles similar to how autotools does feature detection. """ # windows gcc does not support linking with unresolved symbols if sys.platform == 'win32': # pragma: no cover (windows) prefix = getattr(sys, 'real_prefix', sys.prefix) libs = os.path.join(prefix, str('libs')) return str('-L{} -lpython{}{}').format(libs, *sys.version_info[:2]) cc = subprocess.check_output(('go', 'env', 'CC')).decode('UTF-8').strip() with _tmpdir() as tmpdir: testf = os.path.join(tmpdir, 'test.c') with io.open(testf, 'w') as f: f.write('int f(int); int main(void) { return f(0); }\n') for lflag in LFLAGS: # pragma: no cover (platform specific) try: subprocess.check_call((cc, testf, lflag), cwd=tmpdir) return lflag except subprocess.CalledProcessError: pass else: # pragma: no cover (platform specific) # wellp, none of them worked, fall back to gcc and they'll get a # hopefully reasonable error message return LFLAG_GCC
Determine the correct link flags. This attempts dummy compiles similar to how autotools does feature detection.
def has_port_by_name(self, port_name): '''Check if this component has a port by the given name.''' with self._mutex: if self.get_port_by_name(port_name): return True return False
Check if this component has a port by the given name.
def uninitialize_ui(self): """ Uninitializes the Component ui. :return: Method success. :rtype: bool """ LOGGER.debug("> Uninitializing '{0}' Component ui.".format(self.__class__.__name__)) # Signals / Slots. self.Port_spinBox.valueChanged.disconnect(self.__Port_spinBox__valueChanged) self.Autostart_TCP_Server_checkBox.stateChanged.disconnect( self.__Autostart_TCP_Server_checkBox__stateChanged) self.Start_TCP_Server_pushButton.clicked.disconnect(self.__Start_TCP_Server_pushButton__clicked) self.Stop_TCP_Server_pushButton.clicked.disconnect(self.__Stop_TCP_Server_pushButton__clicked) self.initialized_ui = False return True
Uninitializes the Component ui. :return: Method success. :rtype: bool
def is_github_task(task): """Determine if a task is related to GitHub. This function currently looks into the ``schedulerId``, ``extra.tasks_for``, and ``metadata.source``. Args: task (dict): the task definition to check. Returns: bool: True if a piece of data refers to GitHub """ return any(( # XXX Cron tasks don't usually define 'taskcluster-github' as their schedulerId as they # are scheduled within another Taskcluster task. task.get('schedulerId') == 'taskcluster-github', # XXX Same here, cron tasks don't start with github task.get('extra', {}).get('tasks_for', '').startswith('github-'), is_github_url(task.get('metadata', {}).get('source', '')), ))
Determine if a task is related to GitHub. This function currently looks into the ``schedulerId``, ``extra.tasks_for``, and ``metadata.source``. Args: task (dict): the task definition to check. Returns: bool: True if a piece of data refers to GitHub
def list_dir(self, filter_fn=None) -> 'Tuple[Path, ...]': """ * the `self` Path object is assumed to be a directory :param filter_fn: a `None` object or a predicative function `str -> bool` which will be applied on the filename/directory in `self` directory. :return: a tuple of Path objects each of which represents a file/directory in `self` directory. If the filter_fn is not None, each item in return tuple whose filename/directory name doesn't match the `filter_fn` will filtered. e.g: - Dir1 - File.py - File.pyi - File.pyx Dir1.list_dir(lambda path: '.py' in path) => [<Path object of File1.py>] Dir1.list_dir(lambda path: print(path)) IO: File.py File.pyi File.pyx => [] """ path = str(self) items = os.listdir(path) if filter_fn is not None: items = filter(filter_fn, items) return tuple(Path(path_join((path, item))) for item in items)
* the `self` Path object is assumed to be a directory :param filter_fn: a `None` object or a predicative function `str -> bool` which will be applied on the filename/directory in `self` directory. :return: a tuple of Path objects each of which represents a file/directory in `self` directory. If the filter_fn is not None, each item in return tuple whose filename/directory name doesn't match the `filter_fn` will filtered. e.g: - Dir1 - File.py - File.pyi - File.pyx Dir1.list_dir(lambda path: '.py' in path) => [<Path object of File1.py>] Dir1.list_dir(lambda path: print(path)) IO: File.py File.pyi File.pyx => []
def meanOmega(self,dangle,oned=False,tdisrupt=None,approx=True, higherorder=None): """ NAME: meanOmega PURPOSE: calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time INPUT: dangle - angle offset oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption) approx= (True) if True, compute the mean Omega by direct integration of the spline representation higherorder= (object-wide default higherorderTrack) if True, include higher-order spline terms in the approximate computation OUTPUT: mean Omega HISTORY: 2015-11-17 - Written - Bovy (UofT) """ if higherorder is None: higherorder= self._higherorderTrack if tdisrupt is None: tdisrupt= self._tdisrupt if approx: num= self._meanOmega_num_approx(dangle,tdisrupt, higherorder=higherorder) else: num=\ integrate.quad(lambda T: (T/(1-T*T)\ *numpy.sqrt(self._sortedSigOEig[2])\ +self._meandO)\ *numpy.sqrt(self._sortedSigOEig[2])\ *(1+T*T)/(1-T*T)**2.\ *self.pOparapar(T/(1-T*T)\ *numpy.sqrt(self._sortedSigOEig[2])\ +self._meandO,dangle), -1.,1.)[0] denom= self._density_par(dangle,tdisrupt=tdisrupt,approx=approx, higherorder=higherorder) dO1D= num/denom if oned: return dO1D else: return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\ *self._sigMeanSign
NAME: meanOmega PURPOSE: calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time INPUT: dangle - angle offset oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption) approx= (True) if True, compute the mean Omega by direct integration of the spline representation higherorder= (object-wide default higherorderTrack) if True, include higher-order spline terms in the approximate computation OUTPUT: mean Omega HISTORY: 2015-11-17 - Written - Bovy (UofT)