code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def find_session(self, session_name): """Finds guest sessions by their friendly name and returns an interface array with all found guest sessions. in session_name of type str The session's friendly name to find. Wildcards like ? and * are allowed. return sessions of type :class:`IGuestSession` Array with all guest sessions found matching the name specified. """ if not isinstance(session_name, basestring): raise TypeError("session_name can only be an instance of type basestring") sessions = self._call("findSession", in_p=[session_name]) sessions = [IGuestSession(a) for a in sessions] return sessions
Finds guest sessions by their friendly name and returns an interface array with all found guest sessions. in session_name of type str The session's friendly name to find. Wildcards like ? and * are allowed. return sessions of type :class:`IGuestSession` Array with all guest sessions found matching the name specified.
def _convert_old_schema(self, parameters): """ Convert an ugly old schema, using dotted names, to the hot new schema, using List and Structure. The old schema assumes that every other dot implies an array. So a list of two parameters, [Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")] becomes:: [List( "foo", item=Structure( fields={"baz": List(item=Integer()), "shimmy": Integer()}))] By design, the old schema syntax ignored the names "bar" and "quux". """ # 'merged' here is an associative list that maps parameter names to # Parameter instances, OR sub-associative lists which represent nested # lists and structures. # e.g., # [Integer("foo")] # becomes # [("foo", Integer("foo"))] # and # [Integer("foo.bar")] # (which represents a list of integers called "foo" with a meaningless # index name of "bar") becomes # [("foo", [("bar", Integer("foo.bar"))])]. merged = [] for parameter in parameters: segments = parameter.name.split('.') _merge_associative_list(merged, segments, parameter) result = [self._inner_convert_old_schema(node, 1) for node in merged] return result
Convert an ugly old schema, using dotted names, to the hot new schema, using List and Structure. The old schema assumes that every other dot implies an array. So a list of two parameters, [Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")] becomes:: [List( "foo", item=Structure( fields={"baz": List(item=Integer()), "shimmy": Integer()}))] By design, the old schema syntax ignored the names "bar" and "quux".
def canonical_new_peer_list( self, peers_to_add ): """ Make a list of canonical new peers, using the self.new_peers and the given peers to add Return a shuffled list of canonicalized host:port strings. """ new_peers = list(set(self.new_peers + peers_to_add)) random.shuffle( new_peers ) # canonicalize tmp = [] for peer in new_peers: tmp.append( self.canonical_peer(peer) ) new_peers = tmp # don't talk to myself if self.my_hostport in new_peers: new_peers.remove(self.my_hostport) return new_peers
Make a list of canonical new peers, using the self.new_peers and the given peers to add Return a shuffled list of canonicalized host:port strings.
def to_unix_ts(start_time): """Given a datetime object, returns its value as a unix timestamp""" if isinstance(start_time, datetime): if is_timezone_aware(start_time): start_time = start_time.astimezone(pytz.utc) else: log.warning( "Non timezone-aware datetime object passed to IncrementalEndpoint. " "The Zendesk API expects UTC time, if this is not the case results will be incorrect!" ) unix_time = calendar.timegm(start_time.timetuple()) else: unix_time = start_time return int(unix_time)
Given a datetime object, returns its value as a unix timestamp
def get_target(self): """ Reads the android target based on project.properties file. Returns A string containing the project target (android-23 being the default if none is found) """ with open('%s/project.properties' % self.path) as f: for line in f.readlines(): matches = re.findall(r'^target=(.*)', line) if len(matches) == 0: continue return matches[0].replace('\n', '') return 'android-%s' % (config.sdk_version)
Reads the android target based on project.properties file. Returns A string containing the project target (android-23 being the default if none is found)
def event_later(self, delay, data_tuple): """ Schedule an event to be emitted after a delay. :param delay: number of seconds :param data_tuple: a 2-tuple (flavor, data) :return: an event object, useful for cancelling. """ return self._base.event_later(delay, self.make_event_data(*data_tuple))
Schedule an event to be emitted after a delay. :param delay: number of seconds :param data_tuple: a 2-tuple (flavor, data) :return: an event object, useful for cancelling.
def trash_for(self, user): """ Returns all messages that were either received or sent by the given user and are marked as deleted. """ return self.filter( recipient=user, recipient_deleted_at__isnull=False, ) | self.filter( sender=user, sender_deleted_at__isnull=False, )
Returns all messages that were either received or sent by the given user and are marked as deleted.
def remove_global_exception_handler(handler): """remove a callback from the list of global exception handlers :param handler: the callback, previously added via :func:`global_exception_handler`, to remove :type handler: function :returns: bool, whether the handler was found (and therefore removed) """ for i, cb in enumerate(state.global_exception_handlers): cb = cb() if cb is not None and cb is handler: state.global_exception_handlers.pop(i) log.info("removing a global exception handler") return True return False
remove a callback from the list of global exception handlers :param handler: the callback, previously added via :func:`global_exception_handler`, to remove :type handler: function :returns: bool, whether the handler was found (and therefore removed)
def emit(_): """Serialize metrics to the memory mapped buffer.""" if not initialized: raise NotInitialized view = { 'version': __version__, 'counters': {}, 'gauges': {}, 'histograms': {}, 'meters': {}, 'timers': {}, } for (ty, module, name), metric in six.iteritems(all_metrics): view[ty]['%s.%s' % (module, name)] = metric.view() marshalled_view = marshal.dumps(view) if len(marshalled_view) > MAX_MARSHALLED_VIEW_SIZE: log.warn( 'Marshalled length too large, got %d, max %d. ' 'Try recording fewer metrics or increasing ' 'MAX_MARSHALLED_VIEW_SIZE' % (len(marshalled_view), MAX_MARSHALLED_VIEW_SIZE)) return marshalled_metrics_mmap.seek(0) try: # Reading and writing to/from an mmap'ed buffer is not guaranteed # to be atomic, so we must serialize access to it. uwsgi.lock() marshalled_metrics_mmap.write(marshalled_view) finally: uwsgi.unlock()
Serialize metrics to the memory mapped buffer.
def refresh_save_all_action(self): """Enable 'Save All' if there are files to be saved""" editorstack = self.get_current_editorstack() if editorstack: state = any(finfo.editor.document().isModified() or finfo.newly_created for finfo in editorstack.data) self.save_all_action.setEnabled(state)
Enable 'Save All' if there are files to be saved
def codons(self, frame): """ A generator that yields DNA in one codon blocks "frame" counts for 0. This function yields a tuple (triplet, index) with index relative to the original DNA sequence """ start = frame while start + 3 <= self.size: yield self.sequence[start : start + 3], start start += 3
A generator that yields DNA in one codon blocks "frame" counts for 0. This function yields a tuple (triplet, index) with index relative to the original DNA sequence
def get_coord_system_name(header): """Return an appropriate key code for the axes coordinate system by examining the FITS header. """ try: ctype = header['CTYPE1'].strip().upper() except KeyError: try: # see if we have an "RA" header ra = header['RA'] # noqa try: equinox = float(header['EQUINOX']) if equinox < 1984.0: radecsys = 'FK4' else: radecsys = 'FK5' except KeyError: radecsys = 'ICRS' return radecsys.lower() except KeyError: return 'raw' match = re.match(r'^GLON\-.*$', ctype) if match: return 'galactic' match = re.match(r'^ELON\-.*$', ctype) if match: return 'ecliptic' match = re.match(r'^RA\-\-\-.*$', ctype) if match: hdkey = 'RADECSYS' try: radecsys = header[hdkey] except KeyError: try: hdkey = 'RADESYS' radecsys = header[hdkey] except KeyError: # missing keyword # RADESYS defaults to IRCS unless EQUINOX is given # alone, in which case it defaults to FK4 prior to 1984 # and FK5 after 1984. try: equinox = float(header['EQUINOX']) if equinox < 1984.0: radecsys = 'FK4' else: radecsys = 'FK5' except KeyError: radecsys = 'ICRS' radecsys = radecsys.strip() return radecsys.lower() match = re.match(r'^HPLN\-.*$', ctype) if match: return 'helioprojective' match = re.match(r'^HGLT\-.*$', ctype) if match: return 'heliographicstonyhurst' match = re.match(r'^PIXEL$', ctype) if match: return 'pixel' match = re.match(r'^LINEAR$', ctype) if match: return 'pixel' #raise WCSError("Cannot determine appropriate coordinate system from FITS header") # noqa return 'icrs'
Return an appropriate key code for the axes coordinate system by examining the FITS header.
def get_path_to_repo(self, repo: str) -> Path: """ Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored. :param repo: Repo URL :return: Path to where branches from this repository are cloned. """ return Path(self.base_dir) / "repos" / self.repo_id(repo)
Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored. :param repo: Repo URL :return: Path to where branches from this repository are cloned.
def _infer_unknown_dims(old_shape, shape_spec): """Attempts to replace DIM_REST (if present) with a value. Because of `pt.DIM_SAME`, this has more information to compute a shape value than the default reshape's shape function. Args: old_shape: The current shape of the Tensor as a list. shape_spec: A shape spec, see `pt.reshape`. Returns: A list derived from `shape_spec` with `pt.DIM_SAME` replaced by the value from old_shape (if possible) and `pt.DIM_REST` computed (if possible). Raises: ValueError: If there are two many unknown dimensions or the shape_spec requires out of range DIM_SAME. TypeError: If shape_spec if not iterable. """ # To compute the dimension of an unknown, we need to track which of the values # from old_shape are not copied for the numerator and any values specified as # integers for the denominator. # # After the loop, if any input dimension is unknown and not DIM_SAME, the # numerator will be 0. Otherwise it is the product of all non-DIM_SAME # dimensions. This means that the dimension of DIM_REST is # numerator / denominator numerator_elements = [x if x else 0 for x in old_shape] denominator = 1 unknowns = 0 normalized_shape_spec = [] for s in shape_spec: # Equality of tf.Dimension is broken and upstream fix hasn't been accepted. if isinstance(s, tf.Dimension): normalized_shape_spec.append(s.value) elif isinstance(s, tf.TensorShape): for dim in s: normalized_shape_spec.append(dim.value) else: normalized_shape_spec.append(s) result = [] for i, s in enumerate(normalized_shape_spec): if s == DIM_SAME: if i >= len(old_shape): raise ValueError('%d exceeds the input shape' % i) if old_shape[i] is None: result.append(DIM_SAME) else: result.append(old_shape[i]) numerator_elements[i] = 1 elif s in (DIM_REST, -1, None): result.append(-1) unknowns += 1 else: x = int(s) result.append(x) denominator *= x numerator = 1 for x in numerator_elements: numerator *= x if unknowns > 1: raise ValueError('Only one unknown value (-1 or *) is allowed: %s' % shape_spec) elif numerator % denominator != 0: raise ValueError('Input (%s) cannot be reshaped to %s.' % (old_shape, shape_spec)) elif unknowns == 0 and numerator > 0 and numerator != denominator: raise ValueError('Input (%s) cannot be reshaped to %s.' % (old_shape, shape_spec)) if numerator and unknowns: unknown_elements = int(numerator / denominator) return [unknown_elements if x == -1 else x for x in result] else: return result
Attempts to replace DIM_REST (if present) with a value. Because of `pt.DIM_SAME`, this has more information to compute a shape value than the default reshape's shape function. Args: old_shape: The current shape of the Tensor as a list. shape_spec: A shape spec, see `pt.reshape`. Returns: A list derived from `shape_spec` with `pt.DIM_SAME` replaced by the value from old_shape (if possible) and `pt.DIM_REST` computed (if possible). Raises: ValueError: If there are two many unknown dimensions or the shape_spec requires out of range DIM_SAME. TypeError: If shape_spec if not iterable.
def syncTree(self, recursive=False, blockSignals=True): """ Syncs the information from this item to the tree. """ tree = self.treeWidget() # sync the tree information if not tree: return items = [self] if recursive: items += list(self.children(recursive=True)) if blockSignals and not tree.signalsBlocked(): blocked = True tree.blockSignals(True) else: blocked = False date_format = self.ganttWidget().dateFormat() for item in items: for c, col in enumerate(tree.columns()): value = item.property(col, '') item.setData(c, Qt.EditRole, wrapVariant(value)) if blocked: tree.blockSignals(False)
Syncs the information from this item to the tree.
def _replace_envvar(s, _): """env:KEY or env:KEY:DEFAULT""" e = s.split(":") if len(e) > 3 or len(e) == 1 or e[0] != "env": raise ValueError() elif len(e) == 2: # Note: this can/should raise a KeyError (according to spec). return os.environ[e[1]] else: # len(e) == 3 return os.environ.get(e[1], e[2])
env:KEY or env:KEY:DEFAULT
def branches(self): """Get basic block branches. """ branches = [] if self._taken_branch: branches += [(self._taken_branch, 'taken')] if self._not_taken_branch: branches += [(self._not_taken_branch, 'not-taken')] if self._direct_branch: branches += [(self._direct_branch, 'direct')] return branches
Get basic block branches.
def extend_to_data(self, data, **kwargs): """Build transition matrix from new data to the graph Creates a transition matrix such that `Y` can be approximated by a linear combination of landmarks. Any transformation of the landmarks can be trivially applied to `Y` by performing `transform_Y = transitions.dot(transform)` Parameters ---------- Y: array-like, [n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- transitions : array-like, [n_samples_y, self.data.shape[0]] Transition matrix from `Y` to `self.data` """ kernel = self.build_kernel_to_data(data, **kwargs) if sparse.issparse(kernel): pnm = sparse.hstack( [sparse.csr_matrix(kernel[:, self.clusters == i].sum( axis=1)) for i in np.unique(self.clusters)]) else: pnm = np.array([np.sum( kernel[:, self.clusters == i], axis=1).T for i in np.unique(self.clusters)]).transpose() pnm = normalize(pnm, norm='l1', axis=1) return pnm
Build transition matrix from new data to the graph Creates a transition matrix such that `Y` can be approximated by a linear combination of landmarks. Any transformation of the landmarks can be trivially applied to `Y` by performing `transform_Y = transitions.dot(transform)` Parameters ---------- Y: array-like, [n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- transitions : array-like, [n_samples_y, self.data.shape[0]] Transition matrix from `Y` to `self.data`
def wrap_paragraphs(content, hard_breaks=False): """ Returns *content* with all paragraphs wrapped in `<p>` tags. If *hard_breaks* is set, line breaks are converted to `<br />` tags. """ paras = filter(None, [para.strip() for para in content.split('\n\n')]) paras = [build_paragraph(para, hard_breaks) for para in paras] return '\n'.join(paras)
Returns *content* with all paragraphs wrapped in `<p>` tags. If *hard_breaks* is set, line breaks are converted to `<br />` tags.
def compile_dir(dfn, optimize_python=True): ''' Compile *.py in directory `dfn` to *.pyo ''' if PYTHON is None: return if int(PYTHON_VERSION[0]) >= 3: args = [PYTHON, '-m', 'compileall', '-b', '-f', dfn] else: args = [PYTHON, '-m', 'compileall', '-f', dfn] if optimize_python: # -OO = strip docstrings args.insert(1, '-OO') return_code = subprocess.call(args) if return_code != 0: print('Error while running "{}"'.format(' '.join(args))) print('This probably means one of your Python files has a syntax ' 'error, see logs above') exit(1)
Compile *.py in directory `dfn` to *.pyo
def mtf_image_transformer_base_imagenet_mp64(): """Model parallel ImageNet parameters.""" hparams = mtf_image_transformer_base_imagenet() hparams.mesh_shape = "model:8;batch:4" hparams.layout = "batch:batch;d_ff:model;heads:model" hparams.batch_size = 8 hparams.img_len = 64 hparams.num_decoder_layers = 8 return hparams
Model parallel ImageNet parameters.
def create_placeholder_access_object(self, instance): """ Created objects with placeholder slots as properties. Each placeholder created for an object will be added to a `PlaceHolderAccess` object as a set property. """ related_model = self.related_model def get_related_model_objects(name): """ Obtains the related model objects based upon the slot name. :param name: The slot name in string form. :returns; Related model contents if they exist or it will raise a `DoesNotExist` exception. """ # Parent type, parent id and slot are set to be unique on the default # related model and therefore treated as such here. return related_model.objects.get( parent_type=ContentType.objects.get_for_model(type(instance)), parent_id=instance.id, slot=name, ).get_content_items() class PlaceholderAccess(object): def __getattribute__(self, name): """ Allow placeholder contents to be accessed via slot name on the descriptor object. For example if the slot was named `main` and you had a descriptor named `slots` on an object named `page` you would call it by `page.slots.main`. If a slot name is used that does not exist an `AttributeError` will be raised. If you get this error for a slot that should exist, you may need to run `manage.py add_missing_placeholders`. """ try: return get_related_model_objects(name) except related_model.DoesNotExist: return super(PlaceholderAccess, self).__getattribute__(name) def __getitem__(self, item): """ Allow placeholder contents to be accessed via slot name one the descriptor object via a dictionary lookup. For example if the slot was named `main` and you had a descriptor named `slots` on an object named `page` you would call it by `page.slots['main']`. If a slot name is used that does not exist a `KeyError` will be raised. """ try: return get_related_model_objects(item) except related_model.DoesNotExist: raise KeyError return PlaceholderAccess()
Created objects with placeholder slots as properties. Each placeholder created for an object will be added to a `PlaceHolderAccess` object as a set property.
def initialize(config): """ Initialize a connection to the Redis database. """ # Determine the client class to use if 'redis_client' in config: client = utils.find_entrypoint('turnstile.redis_client', config['redis_client'], required=True) else: client = redis.StrictRedis # Extract relevant connection information from the configuration kwargs = {} for cfg_var, type_ in REDIS_CONFIGS.items(): if cfg_var in config: kwargs[cfg_var] = type_(config[cfg_var]) # Make sure we have at a minimum the hostname if 'host' not in kwargs and 'unix_socket_path' not in kwargs: raise redis.ConnectionError("No host specified for redis database") # Look up the connection pool configuration cpool_class = None cpool = {} extra_kwargs = {} for key, value in config.items(): if key.startswith('connection_pool.'): _dummy, _sep, varname = key.partition('.') if varname == 'connection_class': cpool[varname] = utils.find_entrypoint( 'turnstile.connection_class', value, required=True) elif varname == 'max_connections': cpool[varname] = int(value) elif varname == 'parser_class': cpool[varname] = utils.find_entrypoint( 'turnstile.parser_class', value, required=True) else: cpool[varname] = value elif key not in REDIS_CONFIGS and key not in REDIS_EXCLUDES: extra_kwargs[key] = value if cpool: cpool_class = redis.ConnectionPool # Use custom connection pool class if requested... if 'connection_pool' in config: cpool_class = utils.find_entrypoint('turnstile.connection_pool', config['connection_pool'], required=True) # If we're using a connection pool, we'll need to pass the keyword # arguments to that instead of to redis if cpool_class: cpool.update(kwargs) # Use a custom connection class? if 'connection_class' not in cpool: if 'unix_socket_path' in cpool: if 'host' in cpool: del cpool['host'] if 'port' in cpool: del cpool['port'] cpool['path'] = cpool['unix_socket_path'] del cpool['unix_socket_path'] cpool['connection_class'] = redis.UnixDomainSocketConnection else: cpool['connection_class'] = redis.Connection # Build the connection pool to use and set up to pass it into # the redis constructor... kwargs = dict(connection_pool=cpool_class(**cpool)) # Build and return the database kwargs.update(extra_kwargs) return client(**kwargs)
Initialize a connection to the Redis database.
def sheets(self): """return the sheets of data.""" data = Dict() for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml return data
return the sheets of data.
def main(args,parser,subparser=None): '''this is the main entrypoint for a container based web server, with most of the variables coming from the environment. See the Dockerfile template for how this function is executed. ''' # First priority to args.base base = args.base if base is None: base = os.environ.get('EXPFACTORY_BASE') # Does the base folder exist? if base is None: bot.error("You must set a base of experiments with --base" % base) sys.exit(1) if not os.path.exists(base): bot.error("Base folder %s does not exist." % base) sys.exit(1) # Export environment variables for the client experiments = args.experiments if experiments is None: experiments = " ".join(glob("%s/*" % base)) os.environ['EXPFACTORY_EXPERIMENTS'] = experiments # If defined and file exists, set runtime variables if args.vars is not None: if os.path.exists(args.vars): os.environ['EXPFACTORY_RUNTIME_VARS'] = args.vars os.environ['EXPFACTORY_RUNTIME_DELIM'] = args.delim else: bot.warning('Variables file %s not found.' %args.vars) subid = os.environ.get('EXPFACTORY_STUDY_ID') if args.subid is not None: subid = args.subid os.environ['EXPFACTORY_SUBID'] = subid os.environ['EXPFACTORY_RANDOM'] = str(args.disable_randomize) os.environ['EXPFACTORY_BASE'] = base from expfactory.server import start start(port=5000)
this is the main entrypoint for a container based web server, with most of the variables coming from the environment. See the Dockerfile template for how this function is executed.
def get_all(self, sort_order=None, sort_target='key'): """Get all keys currently stored in etcd. :returns: sequence of (value, metadata) tuples """ return self.get( key=_encode(b'\0'), metadata=True, sort_order=sort_order, sort_target=sort_target, range_end=_encode(b'\0'), )
Get all keys currently stored in etcd. :returns: sequence of (value, metadata) tuples
def PartialDynamicSystem(self, ieq, variable): """ returns dynamical system blocks associated to output variable """ if ieq == 0: # U2-U1=signal if variable == self.physical_nodes[0].variable: # U1 is output # U1=U2-signal return [WeightedSum([self.physical_nodes[1].variable, self.voltage_signal], variable, [1, -1])] elif variable == self.physical_nodes[1].variable: # U2 is output # U2=U1+signal return [WeightedSum([self.physical_nodes[0].variable, self.voltage_signal], variable, [1, 1])]
returns dynamical system blocks associated to output variable
def get_path_for_termid(self,termid): """ This function returns the path (in terms of phrase types) from one term the root @type termid: string @param termid: one term id @rtype: list @return: the path, list of phrase types """ terminal_id = self.terminal_for_term.get(termid) paths = self.paths_for_terminal[terminal_id] labels = [self.label_for_nonter[nonter] for nonter in paths[0]] return labels
This function returns the path (in terms of phrase types) from one term the root @type termid: string @param termid: one term id @rtype: list @return: the path, list of phrase types
def find_path(network, pore_pairs, weights=None): r""" Find the shortest path between pairs of pores. Parameters ---------- network : OpenPNM Network Object The Network object on which the search should be performed pore_pairs : array_like An N x 2 array containing N pairs of pores for which the shortest path is sought. weights : array_like, optional An Nt-long list of throat weights for the search. Typically this would be the throat lengths, but could also be used to represent the phase configuration. If no weights are given then the standard topological connections of the Network are used. Returns ------- A dictionary containing both the pores and throats that define the shortest path connecting each pair of input pores. Notes ----- The shortest path is found using Dijkstra's algorithm included in the scipy.sparse.csgraph module TODO: The returned throat path contains the correct values, but not necessarily in the true order Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> a = op.topotools.find_path(network=pn, pore_pairs=[[0, 4], [0, 10]]) >>> a['pores'] [array([0, 1, 4]), array([ 0, 1, 10])] >>> a['throats'] [array([ 0, 19]), array([ 0, 37])] """ Ps = sp.array(pore_pairs, ndmin=2) if weights is None: weights = sp.ones_like(network.Ts) graph = network.create_adjacency_matrix(weights=weights, fmt='csr', drop_zeros=False) paths = csgraph.dijkstra(csgraph=graph, indices=Ps[:, 0], return_predecessors=True)[1] pores = [] throats = [] for row in range(0, sp.shape(Ps)[0]): j = Ps[row][1] ans = [] while paths[row][j] > -9999: ans.append(j) j = paths[row][j] ans.append(Ps[row][0]) ans.reverse() pores.append(sp.array(ans, dtype=int)) Ts = network.find_neighbor_throats(pores=ans, mode='xnor') throats.append(sp.array(Ts, dtype=int)) pdict = PrintableDict dict_ = pdict(**{'pores': pores, 'throats': throats}) return dict_
r""" Find the shortest path between pairs of pores. Parameters ---------- network : OpenPNM Network Object The Network object on which the search should be performed pore_pairs : array_like An N x 2 array containing N pairs of pores for which the shortest path is sought. weights : array_like, optional An Nt-long list of throat weights for the search. Typically this would be the throat lengths, but could also be used to represent the phase configuration. If no weights are given then the standard topological connections of the Network are used. Returns ------- A dictionary containing both the pores and throats that define the shortest path connecting each pair of input pores. Notes ----- The shortest path is found using Dijkstra's algorithm included in the scipy.sparse.csgraph module TODO: The returned throat path contains the correct values, but not necessarily in the true order Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> a = op.topotools.find_path(network=pn, pore_pairs=[[0, 4], [0, 10]]) >>> a['pores'] [array([0, 1, 4]), array([ 0, 1, 10])] >>> a['throats'] [array([ 0, 19]), array([ 0, 37])]
def blob_services(self): """Instance depends on the API version: * 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>` """ api_version = self._get_api_version('blob_services') if api_version == '2018-07-01': from .v2018_07_01.operations import BlobServicesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>`
def configure_interface(self, name, commands): """Configures the specified interface with the commands Args: name (str): The interface name to configure commands: The commands to configure in the interface Returns: True if the commands completed successfully """ commands = make_iterable(commands) commands.insert(0, 'interface %s' % name) return self.configure(commands)
Configures the specified interface with the commands Args: name (str): The interface name to configure commands: The commands to configure in the interface Returns: True if the commands completed successfully
def _init_sbc_config(self, config): """ Translator from namedtuple config representation to the sbc_t type. :param namedtuple config: See :py:class:`.SBCCodecConfig` :returns: """ if (config.channel_mode == SBCChannelMode.CHANNEL_MODE_MONO): self.config.mode = self.codec.SBC_MODE_MONO elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_STEREO): self.config.mode = self.codec.SBC_MODE_STEREO elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_DUAL): self.config.mode = self.codec.SBC_MODE_DUAL_CHANNEL elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_JOINT_STEREO): self.config.mode = self.codec.SBC_MODE_JOINT_STEREO if (config.frequency == SBCSamplingFrequency.FREQ_16KHZ): self.config.frequency = self.codec.SBC_FREQ_16000 elif (config.frequency == SBCSamplingFrequency.FREQ_32KHZ): self.config.frequency = self.codec.SBC_FREQ_32000 elif (config.frequency == SBCSamplingFrequency.FREQ_44_1KHZ): self.config.frequency = self.codec.SBC_FREQ_44100 elif (config.frequency == SBCSamplingFrequency.FREQ_48KHZ): self.config.frequency = self.codec.SBC_FREQ_48000 if (config.allocation_method == SBCAllocationMethod.LOUDNESS): self.config.allocation = self.codec.SBC_AM_LOUDNESS elif (config.allocation_method == SBCAllocationMethod.SNR): self.config.allocation = self.codec.SBC_AM_SNR if (config.subbands == SBCSubbands.SUBBANDS_4): self.config.subbands = self.codec.SBC_SB_4 elif (config.subbands == SBCSubbands.SUBBANDS_8): self.config.subbands = self.codec.SBC_SB_8 if (config.block_length == SBCBlocks.BLOCKS_4): self.config.blocks = self.codec.SBC_BLK_4 elif (config.block_length == SBCBlocks.BLOCKS_8): self.config.blocks = self.codec.SBC_BLK_8 elif (config.block_length == SBCBlocks.BLOCKS_12): self.config.blocks = self.codec.SBC_BLK_12 elif (config.block_length == SBCBlocks.BLOCKS_16): self.config.blocks = self.codec.SBC_BLK_16 self.config.bitpool = config.max_bitpool self.config.endian = self.codec.SBC_LE
Translator from namedtuple config representation to the sbc_t type. :param namedtuple config: See :py:class:`.SBCCodecConfig` :returns:
def last_or_default(self, default, predicate=None): '''The last element (optionally satisfying a predicate) or a default. If the predicate is omitted or is None this query returns the last element in the sequence; otherwise, it returns the last element in the sequence for which the predicate evaluates to True. If there is no such element the value of the default argument is returned. Note: This method uses immediate execution. Args: default: The value which will be returned if either the sequence is empty or there are no elements matching the predicate. predicate: An optional unary predicate function, the only argument to which is the element. The return value should be True for matching elements, otherwise False. If the predicate is omitted or None the last element of the source sequence will be returned. Returns: The last element of the sequence if predicate is None, otherwise the last element for which the predicate returns True. If there is no such element, the default argument is returned. Raises: ValueError: If the Queryable is closed. TypeError: If the predicate is not callable. ''' if self.closed(): raise ValueError("Attempt to call last_or_default() on a " "closed Queryable.") return self._last_or_default(default) if predicate is None else self._last_or_default_predicate(default, predicate)
The last element (optionally satisfying a predicate) or a default. If the predicate is omitted or is None this query returns the last element in the sequence; otherwise, it returns the last element in the sequence for which the predicate evaluates to True. If there is no such element the value of the default argument is returned. Note: This method uses immediate execution. Args: default: The value which will be returned if either the sequence is empty or there are no elements matching the predicate. predicate: An optional unary predicate function, the only argument to which is the element. The return value should be True for matching elements, otherwise False. If the predicate is omitted or None the last element of the source sequence will be returned. Returns: The last element of the sequence if predicate is None, otherwise the last element for which the predicate returns True. If there is no such element, the default argument is returned. Raises: ValueError: If the Queryable is closed. TypeError: If the predicate is not callable.
def lein(word, max_length=4, zero_pad=True): """Return the Lein code for a word. This is a wrapper for :py:meth:`Lein.encode`. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Lein code Examples -------- >>> lein('Christopher') 'C351' >>> lein('Niall') 'N300' >>> lein('Smith') 'S210' >>> lein('Schmidt') 'S521' """ return Lein().encode(word, max_length, zero_pad)
Return the Lein code for a word. This is a wrapper for :py:meth:`Lein.encode`. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Lein code Examples -------- >>> lein('Christopher') 'C351' >>> lein('Niall') 'N300' >>> lein('Smith') 'S210' >>> lein('Schmidt') 'S521'
def image_alias_delete(image, alias, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Delete an alias (this is currently not restricted to the image) image : An image alias, a fingerprint or a image object alias : The alias to delete remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_alias_add xenial/amd64 x "Short version of xenial/amd64" ''' image = _verify_image(image, remote_addr, cert, key, verify_cert) try: image.delete_alias(alias) except pylxd.exceptions.LXDAPIException: return False return True
Delete an alias (this is currently not restricted to the image) image : An image alias, a fingerprint or a image object alias : The alias to delete remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_alias_add xenial/amd64 x "Short version of xenial/amd64"
def move_entry(self, entry = None, group = None): """Move an entry to another group. A v1Group group and a v1Entry entry are needed. """ if entry is None or group is None or type(entry) is not v1Entry or \ type(group) is not v1Group: raise KPError("Need an entry and a group.") elif entry not in self.entries: raise KPError("No entry found.") elif group in self.groups: entry.group.entries.remove(entry) group.entries.append(entry) entry.group_id = group.id_ entry.group = group return True else: raise KPError("No group found.")
Move an entry to another group. A v1Group group and a v1Entry entry are needed.
def style_node(self, additional_style_attrib=None): """ generate a style node (for automatic-styles) could specify additional attributes such as 'style:parent-style-name' or 'style:list-style-name' """ style_attrib = {"style:name": self.name, "style:family": self.FAMILY} if additional_style_attrib: style_attrib.update(additional_style_attrib) if self.PARENT_STYLE_DICT: style_attrib.update(self.PARENT_STYLE_DICT) node = el("style:style", attrib=style_attrib) props = sub_el(node, self.STYLE_PROP, attrib=self.styles) return node
generate a style node (for automatic-styles) could specify additional attributes such as 'style:parent-style-name' or 'style:list-style-name'
def TryLink( self, text, extension ): """Compiles the program given in text to an executable env.Program, using extension as file extension (e.g. '.c'). Returns 1, if compilation was successful, 0 otherwise. The target is saved in self.lastTarget (for further processing). """ return self.TryBuild(self.env.Program, text, extension )
Compiles the program given in text to an executable env.Program, using extension as file extension (e.g. '.c'). Returns 1, if compilation was successful, 0 otherwise. The target is saved in self.lastTarget (for further processing).
def simplified(self): """A simplified representation of the same transformation. """ if self._simplified is None: self._simplified = SimplifiedChainTransform(self) return self._simplified
A simplified representation of the same transformation.
def wnfild(small, window): """ Fill small gaps between adjacent intervals of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnfild_c.html :param small: Limiting measure of small gaps. :type small: float :param window: Window to be filled :type window: spiceypy.utils.support_types.SpiceCell :return: Filled Window. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 small = ctypes.c_double(small) libspice.wnfild_c(small, ctypes.byref(window)) return window
Fill small gaps between adjacent intervals of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnfild_c.html :param small: Limiting measure of small gaps. :type small: float :param window: Window to be filled :type window: spiceypy.utils.support_types.SpiceCell :return: Filled Window. :rtype: spiceypy.utils.support_types.SpiceCell
def pad_light(self, values): """Accept an array of up to 4 values, and return an array of 4 values. If the input array is less than length 4, pad it with zeroes until it is length 4. Also ensure each value is a float""" while len(values) < 4: values.append(0.) return list(map(float, values))
Accept an array of up to 4 values, and return an array of 4 values. If the input array is less than length 4, pad it with zeroes until it is length 4. Also ensure each value is a float
def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target """ x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[-1] means, _, _ = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) targets = tf.reshape(targets, [-1]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) discrete_x, code_loss, distances = vq_body( x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = -distances targets_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = x + tf.stop_gradient(x_means - x) discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) target_means = tf.reshape(target_means, target_shape + [hidden_size]) return discrete_x, x_means, target_means, code_loss, targets_loss
Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target
def get_form(self, request, obj=None, **kwargs): """ Returns a Form class for use in the admin add view. This is used by add_view and change_view. """ parent_id = request.REQUEST.get('parent_id', None) if parent_id: return FolderForm else: folder_form = super(FolderAdmin, self).get_form( request, obj=None, **kwargs) def folder_form_clean(form_obj): cleaned_data = form_obj.cleaned_data folders_with_same_name = Folder.objects.filter( parent=form_obj.instance.parent, name=cleaned_data['name']) if form_obj.instance.pk: folders_with_same_name = folders_with_same_name.exclude( pk=form_obj.instance.pk) if folders_with_same_name.exists(): raise ValidationError( 'Folder with this name already exists.') return cleaned_data # attach clean to the default form rather than defining a new form # class folder_form.clean = folder_form_clean return folder_form
Returns a Form class for use in the admin add view. This is used by add_view and change_view.
def _parse_snapshot_share(response, name): ''' Extracts snapshot return header. ''' snapshot = response.headers.get('x-ms-snapshot') return _parse_share(response, name, snapshot)
Extracts snapshot return header.
def pprint(sequence_file, annotation=None, annotation_file=None, block_length=10, blocks_per_line=6): """ Pretty-print sequence(s) from a file. """ annotations = [] if annotation: annotations.append([(first - 1, last) for first, last in annotation]) try: # Peek to see if this looks like a FASTA file. line = next(sequence_file) if line.startswith('>'): _pprint_fasta(itertools.chain([line], sequence_file), annotations=annotations, annotation_file=annotation_file, block_length=block_length, blocks_per_line=blocks_per_line) else: _pprint_line(line.strip(), annotations=annotations, annotation_file=annotation_file, block_length=block_length, blocks_per_line=blocks_per_line) except StopIteration: pass
Pretty-print sequence(s) from a file.
def detect_language(index_page): """ Detect `languages` using `langdetect` library. Args: index_page (str): HTML content of the page you wish to analyze. Returns: obj: One :class:`.SourceString` object. """ dom = dhtmlparser.parseString(index_page) clean_content = dhtmlparser.removeTags(dom) lang = None try: lang = langdetect.detect(clean_content) except UnicodeDecodeError: lang = langdetect.detect(clean_content.decode("utf-8")) return SourceString( lang, source="langdetect" )
Detect `languages` using `langdetect` library. Args: index_page (str): HTML content of the page you wish to analyze. Returns: obj: One :class:`.SourceString` object.
def copy(self, key=None): """ Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key. """ other = self.__class__( redis=self.redis, key=key, writeback=self.writeback ) other.extend(self) return other
Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key.
def list_space_systems(self, page_size=None): """ Lists the space systems visible to this client. Space systems are returned in lexicographical order. :rtype: :class:`.SpaceSystem` iterator """ params = {} if page_size is not None: params['limit'] = page_size return pagination.Iterator( client=self._client, path='/mdb/{}/space-systems'.format(self._instance), params=params, response_class=mdb_pb2.ListSpaceSystemsResponse, items_key='spaceSystem', item_mapper=SpaceSystem, )
Lists the space systems visible to this client. Space systems are returned in lexicographical order. :rtype: :class:`.SpaceSystem` iterator
def mutate(self, node, index): """Modify the numeric value on `node`.""" assert index < len(OFFSETS), 'received count with no associated offset' assert isinstance(node, parso.python.tree.Number) val = eval(node.value) + OFFSETS[index] # pylint: disable=W0123 return parso.python.tree.Number(' ' + str(val), node.start_pos)
Modify the numeric value on `node`.
def create_or_update_issue_remote_links(self, issue_key, link_url, title, global_id=None, relationship=None): """ Add Remote Link to Issue, update url if global_id is passed :param issue_key: str :param link_url: str :param title: str :param global_id: str, OPTIONAL: :param relationship: str, OPTIONAL: Default by built-in method: 'Web Link' """ url = 'rest/api/2/issue/{issue_key}/remotelink'.format(issue_key=issue_key) data = {'object': {'url': link_url, 'title': title}} if global_id: data['globalId'] = global_id if relationship: data['relationship'] = relationship return self.post(url, data=data)
Add Remote Link to Issue, update url if global_id is passed :param issue_key: str :param link_url: str :param title: str :param global_id: str, OPTIONAL: :param relationship: str, OPTIONAL: Default by built-in method: 'Web Link'
def make_secure_stub(credentials, user_agent, stub_class, host, extra_options=()): """Makes a secure stub for an RPC service. Uses / depends on gRPC. :type credentials: :class:`google.auth.credentials.Credentials` :param credentials: The OAuth2 Credentials to use for creating access tokens. :type user_agent: str :param user_agent: The user agent to be used with API requests. :type stub_class: type :param stub_class: A gRPC stub type for a given service. :type host: str :param host: The host for the service. :type extra_options: tuple :param extra_options: (Optional) Extra gRPC options passed when creating the channel. :rtype: object, instance of ``stub_class`` :returns: The stub object used to make gRPC requests to a given API. """ channel = make_secure_channel( credentials, user_agent, host, extra_options=extra_options ) return stub_class(channel)
Makes a secure stub for an RPC service. Uses / depends on gRPC. :type credentials: :class:`google.auth.credentials.Credentials` :param credentials: The OAuth2 Credentials to use for creating access tokens. :type user_agent: str :param user_agent: The user agent to be used with API requests. :type stub_class: type :param stub_class: A gRPC stub type for a given service. :type host: str :param host: The host for the service. :type extra_options: tuple :param extra_options: (Optional) Extra gRPC options passed when creating the channel. :rtype: object, instance of ``stub_class`` :returns: The stub object used to make gRPC requests to a given API.
def get_shell_history(): """ This only works with some shells. """ # try for ipython if 'get_ipython' in globals(): a = list(get_ipython().history_manager.input_hist_raw) a.reverse() return a elif 'SPYDER_SHELL_ID' in _os.environ: try: p = _os.path.join(_settings.path_user, ".spyder2", "history.py") a = read_lines(p) a.reverse() return a except: pass # otherwise try pyshell or pycrust (requires wx) else: try: import wx for x in wx.GetTopLevelWindows(): if type(x) in [wx.py.shell.ShellFrame, wx.py.crust.CrustFrame]: a = x.shell.GetText().split(">>>") a.reverse() return a except: pass return ['shell history not available']
This only works with some shells.
def tag_secondary_structure(self, force=False): """Tags each `Monomer` in the `Assembly` with it's secondary structure. Notes ----- DSSP must be available to call. Check by running `isambard.external_programs.dssp.test_dssp`. If DSSP is not available, please follow instruction here to add it: https://github.com/woolfson-group/isambard#external-programs For more information on DSSP see [1]. References ---------- .. [1] Kabsch W, Sander C (1983) "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features", Biopolymers, 22, 2577-637. Parameters ---------- force : bool, optional If True the tag will be run even if `Monomers` are already tagged """ for polymer in self._molecules: if polymer.molecule_type == 'protein': polymer.tag_secondary_structure(force=force) return
Tags each `Monomer` in the `Assembly` with it's secondary structure. Notes ----- DSSP must be available to call. Check by running `isambard.external_programs.dssp.test_dssp`. If DSSP is not available, please follow instruction here to add it: https://github.com/woolfson-group/isambard#external-programs For more information on DSSP see [1]. References ---------- .. [1] Kabsch W, Sander C (1983) "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features", Biopolymers, 22, 2577-637. Parameters ---------- force : bool, optional If True the tag will be run even if `Monomers` are already tagged
def update_config(configclass: type(Config)): """Command line function to update and the a config.""" # we build the real click command inside the function, because it needs to be done # dynamically, depending on the config. # we ignore the type errors, keeping the the defaults if needed # everything will be updated anyway config = configclass() # type: Config def print_list(ctx, param, value): # they do like that in the doc (http://click.pocoo.org/6/options/#callbacks-and-eager-options) # so I do the same... but I don't now why. # the only goal is to call __print_list__() if not value or ctx.resilient_parsing: return param config.__print_list__() ctx.exit() def show_conf(ctx, param, value): # see print_list if not value or ctx.resilient_parsing: return param config.__show__() ctx.exit() def reset(ctx, param, value): # see print_list if not value or ctx.resilient_parsing: return param click.confirm('Are you sure you want to reset ALL fields to the defaults ? This action is not reversible.', abort=True) # that doesn't exist configclass.__config_path__, config_path = '', configclass.__config_path__ # So the file won't be opened and only the default will be loaded. config = configclass() # Thus we can save the defaults # To the right place again configclass.__config_path__ = config_path config.__save__() ctx.exit() def clean(ctx, param, value): # see print_list if not value or ctx.resilient_parsing: return param config.__save__() click.echo('Cleaned !') ctx.exit() @click.command(context_settings={'ignore_unknown_options': True}) @click.option('-c', '--clean', is_eager=True, is_flag=True, expose_value=False, callback=clean, help='Clean the file where the configutation is stored.') @click.option('-l', '--list', is_eager=True, is_flag=True, expose_value=False, callback=print_list, help='List the availaible configuration fields.') @click.option('--reset', is_flag=True, is_eager=True, expose_value=False, callback=reset, help='Reset all the fields to their default value.') @click.option('-s', '--show', is_eager=True, is_flag=True, expose_value=False, callback=show_conf, help='View the configuration.') @click.argument('fields-to-set', nargs=-1, type=click.UNPROCESSED) def command(fields_to_set: 'Tuple[str]'): """ I manage your configuration. If you call me with no argument, you will be able to set each field in an interactive prompt. I can show your configuration with -s, list the available field with -l and set them by --name-of-field=whatever. """ # with a context manager, the config is always saved at the end with config: if len(fields_to_set) == 1 and '=' not in fields_to_set[0]: # we want to update a part of the config sub = fields_to_set[0] if sub in config: if isinstance(config[sub], SubConfig): # the part is a subconfig prompt_update_all(config[sub]) else: # TODO: dynamic prompt for one field raise click.BadParameter('%s is not a SubConfig of the configuration') else: raise click.BadParameter('%s is not a field of the configuration') elif fields_to_set: dct = {} for field in fields_to_set: field, _, value = field.partition('=') dct[field] = value # save directly what is passed if something was passed whitout the interactive prompt config.__update__(dct) else: # or update all prompt_update_all(config) # this is the real function for the CLI LOGGER.debug('start command') command() LOGGER.debug('end command')
Command line function to update and the a config.
def list_all(dev: Device): """List all available API calls.""" for name, service in dev.services.items(): click.echo(click.style("\nService %s" % name, bold=True)) for method in service.methods: click.echo(" %s" % method.name)
List all available API calls.
def generate_labels_from_classifications(classifications, timestamps): """ This is to generate continuous segments out of classified small windows :param classifications: :param timestamps: :return: """ window_length = timestamps[1] - timestamps[0] combo_list = [(classifications[k], timestamps[k]) for k in range(0, len(classifications))] labels = [] for k, g in itertools.groupby(combo_list, lambda x: x[0]): items = list(g) start_time = items[0][1] end_time = items[-1][1] + window_length label_class = items[0][0] labels.append(AudacityLabel(start_time, end_time, label_class)) return labels
This is to generate continuous segments out of classified small windows :param classifications: :param timestamps: :return:
def get_client(self, initial_timeout=0.1, next_timeout=30): """ Wait until a client instance is available :param float initial_timeout: how long to wait initially for an existing client to complete :param float next_timeout: if the pool could not obtain a client during the initial timeout, and we have allocated the maximum available number of clients, wait this long until we can retrieve another one :return: A connection object """ try: return self._test_client(self._q.get(True, initial_timeout)) except Empty: try: self._lock.acquire() if self._clients_in_use >= self._max_clients: raise _ClientUnavailableError("Too many clients in use") return self._test_client(self._make_client()) except NetworkError: if not self._tolerate_error: raise except _ClientUnavailableError as e: try: return self._test_client(self._q.get(True, next_timeout)) except Empty: raise e finally: self._lock.release()
Wait until a client instance is available :param float initial_timeout: how long to wait initially for an existing client to complete :param float next_timeout: if the pool could not obtain a client during the initial timeout, and we have allocated the maximum available number of clients, wait this long until we can retrieve another one :return: A connection object
def one_way(data, n): """ One-way chi-square test of independence. Takes a 1D array as input and compares activation at each voxel to proportion expected under a uniform distribution throughout the array. Note that if you're testing activation with this, make sure that only valid voxels (e.g., in-mask gray matter voxels) are included in the array, or results won't make any sense! """ term = data.astype('float64') no_term = n - term t_exp = np.mean(term, 0) t_exp = np.array([t_exp, ] * data.shape[0]) nt_exp = n - t_exp t_mss = (term - t_exp) ** 2 / t_exp nt_mss = (no_term - nt_exp) ** 2 / nt_exp chi2 = t_mss + nt_mss return special.chdtrc(1, chi2)
One-way chi-square test of independence. Takes a 1D array as input and compares activation at each voxel to proportion expected under a uniform distribution throughout the array. Note that if you're testing activation with this, make sure that only valid voxels (e.g., in-mask gray matter voxels) are included in the array, or results won't make any sense!
def _visit_for(self, cls, node, parent): """visit a For node by returning a fresh instance of it""" newnode = cls(node.lineno, node.col_offset, parent) type_annotation = self.check_type_comment(node) newnode.postinit( target=self.visit(node.target, newnode), iter=self.visit(node.iter, newnode), body=[self.visit(child, newnode) for child in node.body], orelse=[self.visit(child, newnode) for child in node.orelse], type_annotation=type_annotation, ) return newnode
visit a For node by returning a fresh instance of it
def multi_constructor_pkl(loader, tag_suffix, node): """ Constructor function passed to PyYAML telling it how to load objects from paths to .pkl files. See PyYAML documentation for details on the call signature. """ mapping = loader.construct_yaml_str(node) if tag_suffix != "" and tag_suffix != u"": raise AssertionError('Expected tag_suffix to be "" but it is "'+tag_suffix+'"') rval = ObjectProxy(None, {}, yaml.serialize(node)) rval.instance = serial.load(mapping) return rval
Constructor function passed to PyYAML telling it how to load objects from paths to .pkl files. See PyYAML documentation for details on the call signature.
def run_inference(examples, serving_bundle): """Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto. """ batch_size = 64 if serving_bundle.estimator and serving_bundle.feature_spec: # If provided an estimator and feature spec then run inference locally. preds = serving_bundle.estimator.predict( lambda: tf.data.Dataset.from_tensor_slices( tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)) if serving_bundle.use_predict: preds_key = serving_bundle.predict_output_tensor elif serving_bundle.model_type == 'regression': preds_key = 'predictions' else: preds_key = 'probabilities' values = [] for pred in preds: values.append(pred[preds_key]) return common_utils.convert_prediction_values(values, serving_bundle) elif serving_bundle.custom_predict_fn: # If custom_predict_fn is provided, pass examples directly for local # inference. values = serving_bundle.custom_predict_fn(examples) return common_utils.convert_prediction_values(values, serving_bundle) else: return platform_utils.call_servo(examples, serving_bundle)
Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto.
def K_separator_demister_York(P, horizontal=False): r'''Calculates the Sounders Brown `K` factor as used in determining maximum permissible gas velocity in a two-phase separator in either a horizontal or vertical orientation, *with a demister*. This function is a curve fit to [1]_ published in [2]_ and is widely used. For 1 < P < 15 psia: .. math:: K = 0.1821 + 0.0029P + 0.0460\ln P For 15 <= P <= 40 psia: .. math:: K = 0.35 For P < 5500 psia: .. math:: K = 0.430 - 0.023\ln P In the above equations, P is in units of psia. Parameters ---------- P : float Pressure of separator, [Pa] horizontal : bool, optional Whether to use the vertical or horizontal value; horizontal is 1.25 times higher, [-] Returns ------- K : float Sounders Brown Horizontal or vertical `K` factor for two-phase separator design with a demister, [m/s] Notes ----- If the input pressure is under 1 psia, 1 psia is used. If the input pressure is over 5500 psia, 5500 psia is used. Examples -------- >>> K_separator_demister_York(975*psi) 0.08281536035331669 References ---------- .. [2] Otto H. York Company, "Mist Elimination in Gas Treatment Plants and Refineries," Engineering, Parsippany, NJ. .. [1] Svrcek, W. Y., and W. D. Monnery. "Design Two-Phase Separators within the Right Limits" Chemical Engineering Progress, (October 1, 1993): 53-60. ''' P = P/psi # Correlation in terms of psia if P < 15: if P < 1: P = 1 # Prevent negative K values, but as a consequence be # optimistic for K values; limit is 0.185 ft/s but real values # should probably be lower K = 0.1821 + 0.0029*P + 0.0460*log(P) elif P < 40: K = 0.35 else: if P > 5500: P = 5500 # Do not allow for lower K values above 5500 psia, as # the limit is stated to be 5500 K = 0.430 - 0.023*log(P) K *= foot # Converts units of ft/s to m/s; the graph and all fits are in ft/s if horizontal: # Watkins recommends a factor of 1.25 for horizontal separators over # vertical separators as well K *= 1.25 return K
r'''Calculates the Sounders Brown `K` factor as used in determining maximum permissible gas velocity in a two-phase separator in either a horizontal or vertical orientation, *with a demister*. This function is a curve fit to [1]_ published in [2]_ and is widely used. For 1 < P < 15 psia: .. math:: K = 0.1821 + 0.0029P + 0.0460\ln P For 15 <= P <= 40 psia: .. math:: K = 0.35 For P < 5500 psia: .. math:: K = 0.430 - 0.023\ln P In the above equations, P is in units of psia. Parameters ---------- P : float Pressure of separator, [Pa] horizontal : bool, optional Whether to use the vertical or horizontal value; horizontal is 1.25 times higher, [-] Returns ------- K : float Sounders Brown Horizontal or vertical `K` factor for two-phase separator design with a demister, [m/s] Notes ----- If the input pressure is under 1 psia, 1 psia is used. If the input pressure is over 5500 psia, 5500 psia is used. Examples -------- >>> K_separator_demister_York(975*psi) 0.08281536035331669 References ---------- .. [2] Otto H. York Company, "Mist Elimination in Gas Treatment Plants and Refineries," Engineering, Parsippany, NJ. .. [1] Svrcek, W. Y., and W. D. Monnery. "Design Two-Phase Separators within the Right Limits" Chemical Engineering Progress, (October 1, 1993): 53-60.
def decree(cls, path, concrete_start='', **kwargs): """ Constructor for Decree binary analysis. :param str path: Path to binary to analyze :param str concrete_start: Concrete stdin to use before symbolic input :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Decree State :rtype: Manticore """ try: return cls(_make_decree(path, concrete_start), **kwargs) except KeyError: # FIXME(mark) magic parsing for DECREE should raise better error raise Exception(f'Invalid binary: {path}')
Constructor for Decree binary analysis. :param str path: Path to binary to analyze :param str concrete_start: Concrete stdin to use before symbolic input :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Decree State :rtype: Manticore
def put_stream(self, rel_path, metadata=None, cb=None): '''Return a Flo object that can be written to to send data to S3. This will result in a multi-part upload, possibly with each part being sent in its own thread ''' import Queue import time import threading md5 = metadata.get('md5', None) if metadata else None # Horrible, but doing it anyway because I can. acl = ('public-read' if metadata.get('public', False) else metadata.get('acl', 'public-read')) if metadata else 'public-read' path = self._prefix(self._rename(rel_path)) class ThreadUploader(threading.Thread): """Thread class for uploading a part to S3""" def __init__(self, n, queue): threading.Thread.__init__(self) self.n = n self.queue = queue def run(self): while True: mp, part_number, buf = self.queue.get() if mp is None: # Signal to die logger.debug( "put_stream: Thread {} exiting".format( self.n)) self.queue.task_done() return logger.debug( "put_stream: Thread {}: processing part: {}".format( self.n, part_number)) t1 = time.time() try: mp.upload_part_from_file(buf, part_number) finally: self.queue.task_done() t2 = time.time() logger.debug("put_stream: Thread {}, part {}. time = {} rate = {} b/s" .format( self.n, part_number, round(t2 - t1, 3), round((float(buf.tell()) / (t2 - t1)), 2))) if metadata is None: metadata = {} if md5: metadata['md5'] = md5 # Multipart uploads don't use md5 for etag # Some libraries, including apparently, boto, have troubles properly calcing the hash when # metadata values are null, empty, 0, etc. # https://forums.aws.amazon.com/thread.jspa?threadID=117580 for k, v in metadata.items(): if not v: del metadata[k] this = self buffer_size = 50 * 1024 * 1024 # Min part size is 5MB num_threads = 4 thread_upload_queue = Queue.Queue(maxsize=100) for i in range(num_threads): t = ThreadUploader(i, thread_upload_queue) t.setDaemon(True) t.start() class flo: '''Object that is returned to the caller, for the caller to issue write() or writeline() calls on ''' def __init__(self, rel_path): import io self.mp = this.bucket.initiate_multipart_upload( path, metadata=metadata) self.part_number = 1 self.buffer = io.BytesIO() self.total_size = 0 self.rel_path = rel_path def _send_buffer(self): '''Schedules a buffer to be sent in a thread by queuing it''' logger.debug( "_send_buffer: sending part {} to thread pool size: {}, total_size = {}" .format( self.part_number, self.buffer.tell(), self.total_size)) self.buffer.seek(0) thread_upload_queue.put( (self.mp, self.part_number, self.buffer)) def write(self, d): import io self.buffer.write(d) # Load the requested data into a buffer self.total_size += len(d) # After the buffer is large enough, send it, then create a new # buffer. if self.buffer.tell() > buffer_size: self._send_buffer() self.part_number += 1 self.buffer = io.BytesIO() def writelines(self, lines): raise NotImplemented() def close(self): if self.buffer.tell() > 0: self._send_buffer() # Wait for all of th upload to complete thread_upload_queue.join() for i in range(num_threads): thread_upload_queue.put( (None, None, None)) # Tell all of the threads to die # Wait for all of the threads to exit thread_upload_queue.join() # Multi-part uploads throw a 400 Bad Request if they are # completed without writing any data. if self.total_size > 0: self.mp.complete_upload() this.bucket.set_acl(acl, path) this.put_metadata(self.rel_path, metadata) def __enter__(self): return self def __exit__(self, type_, value, traceback): if type_: return False self.close() return flo(rel_path)
Return a Flo object that can be written to to send data to S3. This will result in a multi-part upload, possibly with each part being sent in its own thread
def get_next(self): """Return the next node for this walk of the tree. This function is intentionally iterative, not recursive, to sidestep any issues of stack size limitations. """ while self.stack: if self.stack[-1].wkids: node = self.stack[-1].wkids.pop(0) if not self.stack[-1].wkids: self.stack[-1].wkids = None if node in self.history: self.cycle_func(node, self.stack) else: node.wkids = copy.copy(self.kids_func(node, self.stack[-1])) self.stack.append(node) self.history[node] = None else: node = self.stack.pop() del self.history[node] if node: if self.stack: parent = self.stack[-1] else: parent = None self.eval_func(node, parent) return node return None
Return the next node for this walk of the tree. This function is intentionally iterative, not recursive, to sidestep any issues of stack size limitations.
def get_matrix_index(graph: BELGraph) -> Set[str]: """Return set of HGNC names from Proteins/Rnas/Genes/miRNA, nodes that can be used by SPIA.""" # TODO: Using HGNC Symbols for now return { node.name for node in graph if isinstance(node, CentralDogma) and node.namespace.upper() == 'HGNC' }
Return set of HGNC names from Proteins/Rnas/Genes/miRNA, nodes that can be used by SPIA.
def update_commands(self, commands_str): """ update with commands from the block """ commands = dict(parse_qsl(commands_str, keep_blank_values=True)) _if = commands.get("if", self._if) if _if: self._if = Condition(_if) self._set_int(commands, "max_length") self._set_int(commands, "min_length") self.color = self._check_color(commands.get("color")) self.not_zero = "not_zero" in commands or self.not_zero self.show = "show" in commands or self.show self.soft = "soft" in commands or self.soft
update with commands from the block
def check_trytes_codec(encoding): """ Determines which codec to use for the specified encoding. References: - https://docs.python.org/3/library/codecs.html#codecs.register """ if encoding == AsciiTrytesCodec.name: return AsciiTrytesCodec.get_codec_info() elif encoding == AsciiTrytesCodec.compat_name: warn( '"{old_codec}" codec will be removed in PyOTA v2.1. ' 'Use "{new_codec}" instead.'.format( new_codec=AsciiTrytesCodec.name, old_codec=AsciiTrytesCodec.compat_name, ), DeprecationWarning, ) return AsciiTrytesCodec.get_codec_info() return None
Determines which codec to use for the specified encoding. References: - https://docs.python.org/3/library/codecs.html#codecs.register
def merge_intervals(intervals): """ Merge intervals in the form of a list. """ if intervals is None: return None intervals.sort(key=lambda i: i[0]) out = [intervals.pop(0)] for i in intervals: if out[-1][-1] >= i[0]: out[-1][-1] = max(out[-1][-1], i[-1]) else: out.append(i) return out
Merge intervals in the form of a list.
def serialize_to_xml_str(obj_pyxb, pretty=True, strip_prolog=False, xslt_url=None): """Serialize PyXB object to pretty printed XML ``str`` for display. Args: obj_pyxb: PyXB object PyXB object to serialize. pretty: bool False: Disable pretty print formatting. XML will not have line breaks. strip_prolog: True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``), from the resulting XML doc. xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: str: Pretty printed XML document """ return serialize_gen(obj_pyxb, None, pretty, strip_prolog, xslt_url)
Serialize PyXB object to pretty printed XML ``str`` for display. Args: obj_pyxb: PyXB object PyXB object to serialize. pretty: bool False: Disable pretty print formatting. XML will not have line breaks. strip_prolog: True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``), from the resulting XML doc. xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: str: Pretty printed XML document
def get_optimized_molecule(self): """Return a molecule object of the optimal geometry""" opt_coor = self.get_optimization_coordinates() if len(opt_coor) == 0: return None else: return Molecule( self.molecule.numbers, opt_coor[-1], )
Return a molecule object of the optimal geometry
def sample_from_distribution(self, distribution, k, proportions=False): """Return a new table with the same number of rows and a new column. The values in the distribution column are define a multinomial. They are replaced by sample counts/proportions in the output. >>> sizes = Table(['size', 'count']).with_rows([ ... ['small', 50], ... ['medium', 100], ... ['big', 50], ... ]) >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP size | count | count sample small | 50 | 239 medium | 100 | 496 big | 50 | 265 >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP size | count | count sample small | 50 | 0.24 medium | 100 | 0.51 big | 50 | 0.25 """ dist = self._get_column(distribution) total = sum(dist) assert total > 0 and np.all(dist >= 0), 'Counts or a distribution required' dist = dist/sum(dist) sample = np.random.multinomial(k, dist) if proportions: sample = sample / sum(sample) label = self._unused_label(self._as_label(distribution) + ' sample') return self.with_column(label, sample)
Return a new table with the same number of rows and a new column. The values in the distribution column are define a multinomial. They are replaced by sample counts/proportions in the output. >>> sizes = Table(['size', 'count']).with_rows([ ... ['small', 50], ... ['medium', 100], ... ['big', 50], ... ]) >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP size | count | count sample small | 50 | 239 medium | 100 | 496 big | 50 | 265 >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP size | count | count sample small | 50 | 0.24 medium | 100 | 0.51 big | 50 | 0.25
def _drop_duplicate_ij(self): """ Drops duplicate entries from the network dataframe. """ self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list( zip(*[self.network['i'].values, self.network['j'].values])))) self.network.drop_duplicates(['ij', 't'], inplace=True) self.network.reset_index(inplace=True, drop=True) self.network.drop('ij', inplace=True, axis=1)
Drops duplicate entries from the network dataframe.
def create_many(self, statements): """ Creates multiple statement entries. """ create_statements = [] for statement in statements: statement_data = statement.serialize() tag_data = list(set(statement_data.pop('tags', []))) statement_data['tags'] = tag_data if not statement.search_text: statement_data['search_text'] = self.tagger.get_bigram_pair_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_data['search_in_response_to'] = self.tagger.get_bigram_pair_string(statement.in_response_to) create_statements.append(statement_data) self.statements.insert_many(create_statements)
Creates multiple statement entries.
async def execute(self, query: str, *args, timeout: float=None) -> str: """Execute an SQL command (or commands). Pool performs this operation using one of its connections. Other than that, it behaves identically to :meth:`Connection.execute() <connection.Connection.execute>`. .. versionadded:: 0.10.0 """ async with self.acquire() as con: return await con.execute(query, *args, timeout=timeout)
Execute an SQL command (or commands). Pool performs this operation using one of its connections. Other than that, it behaves identically to :meth:`Connection.execute() <connection.Connection.execute>`. .. versionadded:: 0.10.0
def setBaudrate(self, baudrate): '''set baudrate''' from . import mavutil if self.baudrate == baudrate: return self.baudrate = baudrate self.mav.mav.serial_control_send(self.port, mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE, 0, self.baudrate, 0, [0]*70) self.flushInput() self.debug("Changed baudrate %u" % self.baudrate)
set baudrate
def PopEvent(self): """Pops an event from the heap. Returns: tuple: containing: str: identifier of the event MACB group or None if the event cannot be grouped. str: identifier of the event content. EventObject: event. """ try: macb_group_identifier, content_identifier, event = heapq.heappop( self._heap) if macb_group_identifier == '': macb_group_identifier = None return macb_group_identifier, content_identifier, event except IndexError: return None
Pops an event from the heap. Returns: tuple: containing: str: identifier of the event MACB group or None if the event cannot be grouped. str: identifier of the event content. EventObject: event.
def op( name, labels, predictions, num_thresholds=None, weights=None, display_name=None, description=None, collections=None): """Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf if num_thresholds is None: num_thresholds = _DEFAULT_NUM_THRESHOLDS if weights is None: weights = 1.0 dtype = predictions.dtype with tf.name_scope(name, values=[labels, predictions, weights]): tf.assert_type(labels, tf.bool) # We cast to float to ensure we have 0.0 or 1.0. f_labels = tf.cast(labels, dtype) # Ensure predictions are all in range [0.0, 1.0]. predictions = tf.minimum(1.0, tf.maximum(0.0, predictions)) # Get weighted true/false labels. true_labels = f_labels * weights false_labels = (1.0 - f_labels) * weights # Before we begin, flatten predictions. predictions = tf.reshape(predictions, [-1]) # Shape the labels so they are broadcast-able for later multiplication. true_labels = tf.reshape(true_labels, [-1, 1]) false_labels = tf.reshape(false_labels, [-1, 1]) # To compute TP/FP/TN/FN, we are measuring a binary classifier # C(t) = (predictions >= t) # at each threshold 't'. So we have # TP(t) = sum( C(t) * true_labels ) # FP(t) = sum( C(t) * false_labels ) # # But, computing C(t) requires computation for each t. To make it fast, # observe that C(t) is a cumulative integral, and so if we have # thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1} # where n = num_thresholds, and if we can compute the bucket function # B(i) = Sum( (predictions == t), t_i <= t < t{i+1} ) # then we get # C(t_i) = sum( B(j), j >= i ) # which is the reversed cumulative sum in tf.cumsum(). # # We can compute B(i) efficiently by taking advantage of the fact that # our thresholds are evenly distributed, in that # width = 1.0 / (num_thresholds - 1) # thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0] # Given a prediction value p, we can map it to its bucket by # bucket_index(p) = floor( p * (num_thresholds - 1) ) # so we can use tf.scatter_add() to update the buckets in one pass. # Compute the bucket indices for each prediction value. bucket_indices = tf.cast( tf.floor(predictions * (num_thresholds - 1)), tf.int32) # Bucket predictions. tp_buckets = tf.reduce_sum( input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels, axis=0) fp_buckets = tf.reduce_sum( input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels, axis=0) # Set up the cumulative sums to compute the actual metrics. tp = tf.cumsum(tp_buckets, reverse=True, name='tp') fp = tf.cumsum(fp_buckets, reverse=True, name='fp') # fn = sum(true_labels) - tp # = sum(tp_buckets) - tp # = tp[0] - tp # Similarly, # tn = fp[0] - fp tn = fp[0] - fp fn = tp[0] - tp precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn) return _create_tensor_summary( name, tp, fp, tn, fn, precision, recall, num_thresholds, display_name, description, collections)
Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall.
def add_template(self, tpl): """ Adds and index a template into the `templates` container. This implementation takes into account that a service has two naming attribute: `host_name` and `service_description`. :param tpl: The template to add :type tpl: :return: None """ objcls = self.inner_class.my_type name = getattr(tpl, 'name', '') sdesc = getattr(tpl, 'service_description', '') hname = getattr(tpl, 'host_name', '') logger.debug("Adding a %s template: host_name: %s, name: %s, service_description: %s", objcls, hname, name, sdesc) if not name and not hname: msg = "a %s template has been defined without name nor host_name. from: %s" \ % (objcls, tpl.imported_from) tpl.add_error(msg) elif not name and not sdesc: msg = "a %s template has been defined without name nor service_description. from: %s" \ % (objcls, tpl.imported_from) tpl.add_error(msg) elif not name: # If name is not defined, use the host_name_service_description as name (fix #791) setattr(tpl, 'name', "%s_%s" % (hname, sdesc)) tpl = self.index_template(tpl) elif name: tpl = self.index_template(tpl) self.templates[tpl.uuid] = tpl logger.debug('\tAdded service template #%d %s', len(self.templates), tpl)
Adds and index a template into the `templates` container. This implementation takes into account that a service has two naming attribute: `host_name` and `service_description`. :param tpl: The template to add :type tpl: :return: None
def _send(self, data): """Send data to statsd.""" try: self._sock.sendto(data.encode('ascii'), self._addr) except (socket.error, RuntimeError): # No time for love, Dr. Jones! pass
Send data to statsd.
def display_name(self): """ Find the most appropriate display name for a user: look for a "display_name", then a "real_name", and finally fall back to the always-present "name". """ for k in self._NAME_KEYS: if self._raw.get(k): return self._raw[k] if "profile" in self._raw and self._raw["profile"].get(k): return self._raw["profile"][k] return self._raw["name"]
Find the most appropriate display name for a user: look for a "display_name", then a "real_name", and finally fall back to the always-present "name".
def hash(self): ''' :rtype: int :return: hash of the condition ''' hashed = super(Compare, self).hash() return khash(hashed, self._comp_value, self._comp_type)
:rtype: int :return: hash of the condition
def _get_bq_service(credentials=None, service_url=None): """Construct an authorized BigQuery service object.""" assert credentials, 'Must provide ServiceAccountCredentials' http = credentials.authorize(Http()) service = build( 'bigquery', 'v2', http=http, discoveryServiceUrl=service_url, cache_discovery=False ) return service
Construct an authorized BigQuery service object.
def list_(): ''' Return the list of frozen states. CLI Example: .. code-block:: bash salt '*' freezer.list ''' ret = [] states_path = _states_path() if not os.path.isdir(states_path): return ret for state in os.listdir(states_path): if state.endswith(('-pkgs.yml', '-reps.yml')): # Remove the suffix, as both share the same size ret.append(state[:-9]) return sorted(set(ret))
Return the list of frozen states. CLI Example: .. code-block:: bash salt '*' freezer.list
def numberOfXTilesAtZoom(self, zoom): "Returns the number of tiles over x at a given zoom level" [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom) return maxCol - minCol + 1
Returns the number of tiles over x at a given zoom level
def get_last_weeks(number_of_weeks): """Get the last weeks.""" time_now = datetime.now() year = time_now.isocalendar()[0] week = time_now.isocalendar()[1] weeks = [] for i in range(0, number_of_weeks): start = get_week_dates(year, week - i, as_timestamp=True)[0] n_year, n_week = get_year_week(start) weeks.append((n_year, n_week)) return weeks
Get the last weeks.
def from_pubkey(cls, pubkey, compressed=True, version=56, prefix=None): # Ensure this is a public key pubkey = PublicKey(pubkey, prefix=prefix or Prefix.prefix) if compressed: pubkey_plain = pubkey.compressed() else: pubkey_plain = pubkey.uncompressed() """ Derive address using ``RIPEMD160(SHA512(x))`` """ addressbin = ripemd160(hashlib.sha512(unhexlify(pubkey_plain)).hexdigest()) result = Base58(hexlify(addressbin).decode("ascii")) return cls(result, prefix=pubkey.prefix)
Derive address using ``RIPEMD160(SHA512(x))``
def save_model(model, output_file=None, output_dir=None, output_prefix='pymzn'): """Save a model to file. Parameters ---------- model : str The minizinc model (i.e. the content of a ``.mzn`` file). output_file : str The path to the output file. If this parameter is ``None`` (default), a temporary file is created with the given model in the specified output directory, using the specified prefix. output_dir : str The directory where to create the file in case ``output_file`` is None. Default is ``None``, which creates a file in the system temporary directory. output_prefix : str The prefix for the output file if created. Default is ``'pymzn'``. Returns ------- str The path to the newly created ``.mzn`` file. """ if output_file: mzn_file = output_file output_file = open(output_file, 'w+', buffering=1) else: output_prefix += '_' output_file = NamedTemporaryFile( dir=output_dir, prefix=output_prefix, suffix='.mzn', delete=False, mode='w+', buffering=1 ) mzn_file = output_file.name output_file.write(model) output_file.close() logger.info('Generated file {}'.format(mzn_file)) return mzn_file
Save a model to file. Parameters ---------- model : str The minizinc model (i.e. the content of a ``.mzn`` file). output_file : str The path to the output file. If this parameter is ``None`` (default), a temporary file is created with the given model in the specified output directory, using the specified prefix. output_dir : str The directory where to create the file in case ``output_file`` is None. Default is ``None``, which creates a file in the system temporary directory. output_prefix : str The prefix for the output file if created. Default is ``'pymzn'``. Returns ------- str The path to the newly created ``.mzn`` file.
def get_point(self, *position): """Return the noise value of a specific position. Example usage: value = noise.getPoint(x, y, z) Args: position (Tuple[float, ...]): The point to sample at. Returns: float: The noise value at position. This will be a floating point in the 0.0-1.0 range. """ #array = self._array #for d, pos in enumerate(position): # array[d] = pos #array = self._cFloatArray(*position) array = _ffi.new(self._arrayType, position) if self._useOctaves: return (self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5 return (self._noiseFunc(self._noise, array) + 1) * 0.5
Return the noise value of a specific position. Example usage: value = noise.getPoint(x, y, z) Args: position (Tuple[float, ...]): The point to sample at. Returns: float: The noise value at position. This will be a floating point in the 0.0-1.0 range.
def set_boolean(self, option, value): """Set a boolean option. Args: option (str): name of option. value (bool): value of the option. Raises: TypeError: Value must be a boolean. """ if not isinstance(value, bool): raise TypeError("%s must be a boolean" % option) self.options[option] = str(value).lower()
Set a boolean option. Args: option (str): name of option. value (bool): value of the option. Raises: TypeError: Value must be a boolean.
def plot(self, format='segments', bits=None, **kwargs): """Plot the data for this `StateVector` Parameters ---------- format : `str`, optional, default: ``'segments'`` The type of plot to make, either 'segments' to plot the SegmentList for each bit, or 'timeseries' to plot the raw data for this `StateVector` bits : `list`, optional A list of bit indices or bit names, defaults to `~StateVector.bits`. This argument is ignored if ``format`` is not ``'segments'`` **kwargs Other keyword arguments to be passed to either `~gwpy.plot.SegmentAxes.plot` or `~gwpy.plot.Axes.plot`, depending on ``format``. Returns ------- plot : `~gwpy.plot.Plot` output plot object See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_flag for documentation of keyword arguments used in rendering each statevector flag. """ if format == 'timeseries': return super(StateVector, self).plot(**kwargs) if format == 'segments': from ..plot import Plot kwargs.setdefault('xscale', 'auto-gps') return Plot(*self.to_dqflags(bits=bits).values(), projection='segments', **kwargs) raise ValueError("'format' argument must be one of: 'timeseries' or " "'segments'")
Plot the data for this `StateVector` Parameters ---------- format : `str`, optional, default: ``'segments'`` The type of plot to make, either 'segments' to plot the SegmentList for each bit, or 'timeseries' to plot the raw data for this `StateVector` bits : `list`, optional A list of bit indices or bit names, defaults to `~StateVector.bits`. This argument is ignored if ``format`` is not ``'segments'`` **kwargs Other keyword arguments to be passed to either `~gwpy.plot.SegmentAxes.plot` or `~gwpy.plot.Axes.plot`, depending on ``format``. Returns ------- plot : `~gwpy.plot.Plot` output plot object See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_flag for documentation of keyword arguments used in rendering each statevector flag.
def idxmax(self, axis=0, skipna=True, *args, **kwargs): """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmax. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmax. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan
def from_df(cls, df, **kwargs): """ DataFrame must have the right columns. these are: name, band, resolution, mag, e_mag, separation, pa """ tree = cls(**kwargs) for (n,b), g in df.groupby(['name','band']): #g.sort('separation', inplace=True) #ensures that the first is reference sources = [Source(**s[['mag','e_mag','separation','pa','relative']]) for _,s in g.iterrows()] obs = Observation(n, b, g.resolution.mean(), sources=sources, relative=g.relative.any()) tree.add_observation(obs) # For all relative mags, set reference to be brightest return tree
DataFrame must have the right columns. these are: name, band, resolution, mag, e_mag, separation, pa
def to_json(self): """ :return: str """ json_dict = self.to_json_basic() json_dict['channel'] = self.channel json_dict['disable_inhibit_forced'] = self.disable_inhibit_forced json_dict['status'] = self.status json_dict['led_status'] = self.led_status json_dict['delay_time'] = self.delay_time return json.dumps(json_dict)
:return: str
def _doBottomUpCompute(self, rfInput, resetSignal): """ Do one iteration of inference and/or learning and return the result Parameters: -------------------------------------------- rfInput: Input vector. Shape is: (1, inputVectorLen). resetSignal: True if reset is asserted """ # Conditional compute break self._conditionalBreak() # Save the rfInput for the spInputNonZeros parameter self._spatialPoolerInput = rfInput.reshape(-1) assert(rfInput.shape[0] == 1) # Run inference using the spatial pooler. We learn on the coincidences only # if we are in learning mode and trainingStep is set appropriately. # Run SFDR bottom-up compute and cache output in self._spatialPoolerOutput inputVector = numpy.array(rfInput[0]).astype('uint32') outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32') self._sfdr.compute(inputVector, self.learningMode, outputVector) self._spatialPoolerOutput[:] = outputVector[:] # Direct logging of SP outputs if requested if self._fpLogSP: output = self._spatialPoolerOutput.reshape(-1) outputNZ = output.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogSP, output.size, outStr # Direct logging of SP inputs if self._fpLogSPInput: output = rfInput.reshape(-1) outputNZ = output.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogSPInput, output.size, outStr return self._spatialPoolerOutput
Do one iteration of inference and/or learning and return the result Parameters: -------------------------------------------- rfInput: Input vector. Shape is: (1, inputVectorLen). resetSignal: True if reset is asserted
def get_value_from_handle(self, handle, key, handlerecord_json=None): ''' Retrieve a single value from a single Handle. If several entries with this key exist, the methods returns the first one. If the handle does not exist, the method will raise a HandleNotFoundException. :param handle: The handle to take the value from. :param key: The key. :return: A string containing the value or None if the Handle record does not contain the key. :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` ''' LOGGER.debug('get_value_from_handle...') handlerecord_json = self.__get_handle_record_if_necessary(handle, handlerecord_json) if handlerecord_json is None: raise HandleNotFoundException(handle=handle) list_of_entries = handlerecord_json['values'] indices = [] for i in xrange(len(list_of_entries)): if list_of_entries[i]['type'] == key: indices.append(i) if len(indices) == 0: return None else: if len(indices) > 1: LOGGER.debug('get_value_from_handle: The handle ' + handle + \ ' contains several entries of type "' + key + \ '". Only the first one is returned.') return list_of_entries[indices[0]]['data']['value']
Retrieve a single value from a single Handle. If several entries with this key exist, the methods returns the first one. If the handle does not exist, the method will raise a HandleNotFoundException. :param handle: The handle to take the value from. :param key: The key. :return: A string containing the value or None if the Handle record does not contain the key. :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
def assign_valence(mol): """Assign pi electron and hydrogens""" for u, v, bond in mol.bonds_iter(): if bond.order == 2: mol.atom(u).pi = 1 mol.atom(v).pi = 1 if mol.atom(u).symbol == "O" and not mol.atom(u).charge: mol.atom(v).carbonyl_C = 1 if mol.atom(v).symbol == "O" and not mol.atom(v).charge: mol.atom(u).carbonyl_C = 1 elif bond.order == 3: mol.atom(u).pi = mol.atom(v).pi = 2 max_nbr = {"C": 4, "Si": 4, "N": 3, "P": 3, "As": 3, "O": 2, "S": 2, "Se": 2, "F": 1, "Cl": 1, "Br": 1, "I": 1} for i, nbrs in mol.neighbors_iter(): atom = mol.atom(i) if len(nbrs) == 2 and all(bond.order == 2 for bond in nbrs.values()): atom.pi = 2 # sp (allene, ketene) if atom.symbol in max_nbr: h_cnt = max_nbr[atom.symbol] - len(nbrs) - atom.pi + atom.charge if h_cnt > 0: mol.atom(i).add_hydrogen(h_cnt) mol.descriptors.add("Valence")
Assign pi electron and hydrogens
def get_curline(): """Return the current python source line.""" if Frame: frame = Frame.get_selected_python_frame() if frame: line = '' f = frame.get_pyop() if f and not f.is_optimized_out(): cwd = os.path.join(os.getcwd(), '') fname = f.filename() if cwd in fname: fname = fname[len(cwd):] try: line = f.current_line() except IOError: pass if line: # Use repr(line) to avoid UnicodeDecodeError on the # following print invocation. line = repr(line).strip("'") line = line[:-2] if line.endswith(r'\n') else line return ('-> %s(%s): %s' % (fname, f.current_line_num(), line)) return ''
Return the current python source line.
def setUp(self, mfd_conf): ''' Input core configuration parameters as specified in the configuration file :param dict mfd_conf: Configuration file containing the following attributes: * 'Type' - Choose between the 1st, 2nd or 3rd type of recurrence model {'First' | 'Second' | 'Third'} * 'Model_Weight' - Logic tree weight of model type (float) * 'MFD_spacing' - Width of MFD bin (float) * 'Minimum_Magnitude' - Minimum magnitude of activity rates (float) * 'b_value' - Tuple of (b-value, b-value uncertainty) * 'Maximum_Magnitude' - Maximum magnitude on fault (if not defined will use scaling relation) * 'Maximum_Magnitude_Uncertainty' - Uncertainty on maximum magnitude (If not defined and the MSR has a sigma term then this will be taken from sigma) ''' self.mfd_type = mfd_conf['Model_Type'] self.mfd_model = 'Anderson & Luco (Arbitrary) ' + self.mfd_type self.mfd_weight = mfd_conf['Model_Weight'] self.bin_width = mfd_conf['MFD_spacing'] self.mmin = mfd_conf['Minimum_Magnitude'] self.mmax = None self.mmax_sigma = None self.b_value = mfd_conf['b_value'][0] self.b_value_sigma = mfd_conf['b_value'][1] self.occurrence_rate = None
Input core configuration parameters as specified in the configuration file :param dict mfd_conf: Configuration file containing the following attributes: * 'Type' - Choose between the 1st, 2nd or 3rd type of recurrence model {'First' | 'Second' | 'Third'} * 'Model_Weight' - Logic tree weight of model type (float) * 'MFD_spacing' - Width of MFD bin (float) * 'Minimum_Magnitude' - Minimum magnitude of activity rates (float) * 'b_value' - Tuple of (b-value, b-value uncertainty) * 'Maximum_Magnitude' - Maximum magnitude on fault (if not defined will use scaling relation) * 'Maximum_Magnitude_Uncertainty' - Uncertainty on maximum magnitude (If not defined and the MSR has a sigma term then this will be taken from sigma)
def deploy(stage, lambda_package, no_lambda, rebuild_deps, config_file): """Deploy the project to the development stage.""" config = _load_config(config_file) if stage is None: stage = config['devstage'] s3 = boto3.client('s3') cfn = boto3.client('cloudformation') region = _get_aws_region() # obtain previous deployment if it exists previous_deployment = None try: previous_deployment = cfn.describe_stacks( StackName=config['name'])['Stacks'][0] except botocore.exceptions.ClientError: pass # build lambda package if required built_package = False new_package = True if lambda_package is None and not no_lambda: print("Building lambda package...") lambda_package = _build(config, rebuild_deps=rebuild_deps) built_package = True elif lambda_package is None: # preserve package from previous deployment new_package = False lambda_package = _get_from_stack(previous_deployment, 'Parameter', 'LambdaS3Key') # create S3 bucket if it doesn't exist yet bucket = config['aws']['s3_bucket'] _ensure_bucket_exists(s3, bucket, region) # upload lambda package to S3 if new_package: s3.upload_file(lambda_package, bucket, lambda_package) if built_package: # we created the package, so now that is on S3 we can delete it os.remove(lambda_package) # prepare cloudformation template template_body = get_cfn_template(config) parameters = [ {'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket}, {'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package}, ] stages = list(config['stage_environments'].keys()) stages.sort() for s in stages: param = s.title() + 'Version' if s != stage: v = _get_from_stack(previous_deployment, 'Parameter', param) \ if previous_deployment else '$LATEST' v = v or '$LATEST' else: v = '$LATEST' parameters.append({'ParameterKey': param, 'ParameterValue': v}) # run the cloudformation template if previous_deployment is None: print('Deploying {}:{}...'.format(config['name'], stage)) cfn.create_stack(StackName=config['name'], TemplateBody=template_body, Parameters=parameters, Capabilities=['CAPABILITY_IAM']) waiter = cfn.get_waiter('stack_create_complete') else: print('Updating {}:{}...'.format(config['name'], stage)) cfn.update_stack(StackName=config['name'], TemplateBody=template_body, Parameters=parameters, Capabilities=['CAPABILITY_IAM']) waiter = cfn.get_waiter('stack_update_complete') # wait for cloudformation to do its thing try: waiter.wait(StackName=config['name']) except botocore.exceptions.ClientError: # the update failed, so we remove the lambda package from S3 if built_package: s3.delete_object(Bucket=bucket, Key=lambda_package) raise else: if previous_deployment and new_package: # the update succeeded, so it is safe to delete the lambda package # used by the previous deployment old_pkg = _get_from_stack(previous_deployment, 'Parameter', 'LambdaS3Key') s3.delete_object(Bucket=bucket, Key=old_pkg) # we are done, show status info and exit _print_status(config)
Deploy the project to the development stage.