code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def read_mnist_images(filename, dtype=None): """Read MNIST images from the original ubyte file format. Parameters ---------- filename : str Filename/path from which to read images. dtype : 'float32', 'float64', or 'bool' If unspecified, images will be returned in their original unsigned byte format. Returns ------- images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols) An image array, with individual examples indexed along the first axis and the image dimensions along the second and third axis. Notes ----- If the dtype provided was Boolean, the resulting array will be Boolean with `True` if the corresponding pixel had a value greater than or equal to 128, `False` otherwise. If the dtype provided was a float dtype, the values will be mapped to the unit interval [0, 1], with pixel values that were 255 in the original unsigned byte representation equal to 1.0. """ with gzip.open(filename, 'rb') as f: magic, number, rows, cols = struct.unpack('>iiii', f.read(16)) if magic != MNIST_IMAGE_MAGIC: raise ValueError("Wrong magic number reading MNIST image file") array = numpy.frombuffer(f.read(), dtype='uint8') array = array.reshape((number, 1, rows, cols)) if dtype: dtype = numpy.dtype(dtype) if dtype.kind == 'b': # If the user wants Booleans, threshold at half the range. array = array >= 128 elif dtype.kind == 'f': # Otherwise, just convert. array = array.astype(dtype) array /= 255. else: raise ValueError("Unknown dtype to convert MNIST to") return array
Read MNIST images from the original ubyte file format. Parameters ---------- filename : str Filename/path from which to read images. dtype : 'float32', 'float64', or 'bool' If unspecified, images will be returned in their original unsigned byte format. Returns ------- images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols) An image array, with individual examples indexed along the first axis and the image dimensions along the second and third axis. Notes ----- If the dtype provided was Boolean, the resulting array will be Boolean with `True` if the corresponding pixel had a value greater than or equal to 128, `False` otherwise. If the dtype provided was a float dtype, the values will be mapped to the unit interval [0, 1], with pixel values that were 255 in the original unsigned byte representation equal to 1.0.
def get_user_id(self): 'Returns "id" of a OneDrive user.' if self._user_id is None: self._user_id = self.get_user_data()['id'] return self._user_id
Returns "id" of a OneDrive user.
def register(self, model, handler=None): """ Register a permission handler to the model Parameters ---------- model : django model class A django model class handler : permission handler class, string, or None A permission handler class or a dotted path Raises ------ ImproperlyConfigured Raise when the model is abstract model KeyError Raise when the model is already registered in registry The model cannot have more than one handler. """ from permission.handlers import PermissionHandler if model._meta.abstract: raise ImproperlyConfigured( 'The model %s is abstract, so it cannot be registered ' 'with permission.' % model) if model in self._registry: raise KeyError("A permission handler class is already " "registered for '%s'" % model) if handler is None: handler = settings.PERMISSION_DEFAULT_PERMISSION_HANDLER if isstr(handler): handler = import_string(handler) if not inspect.isclass(handler): raise AttributeError( "`handler` attribute must be a class. " "An instance was specified.") if not issubclass(handler, PermissionHandler): raise AttributeError( "`handler` attribute must be a subclass of " "`permission.handlers.PermissionHandler`") # Instantiate the handler to save in the registry instance = handler(model) self._registry[model] = instance
Register a permission handler to the model Parameters ---------- model : django model class A django model class handler : permission handler class, string, or None A permission handler class or a dotted path Raises ------ ImproperlyConfigured Raise when the model is abstract model KeyError Raise when the model is already registered in registry The model cannot have more than one handler.
def action_is_satisfied(action): ''' Returns False if the parse would raise an error if no more arguments are given to this action, True otherwise. ''' num_consumed_args = _num_consumed_args.get(action, 0) if action.nargs in [OPTIONAL, ZERO_OR_MORE, REMAINDER]: return True if action.nargs == ONE_OR_MORE: return num_consumed_args >= 1 if action.nargs == PARSER: # Not sure what this should be, but this previously always returned False # so at least this won't break anything that wasn't already broken. return False if action.nargs is None: return num_consumed_args == 1 assert isinstance(action.nargs, int), 'failed to handle a possible nargs value: %r' % action.nargs return num_consumed_args == action.nargs
Returns False if the parse would raise an error if no more arguments are given to this action, True otherwise.
def _update_solution_data(self, s): """ Returns the voltage angle and generator set-point vectors. """ x = s["x"] # Va_var = self.om.get_var("Va") # Vm_var = self.om.get_var("Vm") # Pg_var = self.om.get_var("Pg") # Qg_var = self.om.get_var("Qg") Va = x[self._Va.i1:self._Va.iN + 1] Vm = x[self._Vm.i1:self._Vm.iN + 1] Pg = x[self._Pg.i1:self._Pg.iN + 1] Qg = x[self._Qg.i1:self._Qg.iN + 1] # f = 0.5 * dot(x.T * HH, x) + dot(CC.T, x) # s["f"] = s["f"] + C0 # Put the objective function value in the solution. # solution["f"] = f return Va, Vm, Pg, Qg
Returns the voltage angle and generator set-point vectors.
def split(self, verbose=None, end_in_new_line=None): """Save the elapsed time of the current split and restart the stopwatch. The current elapsed time will be appended to :attr:`split_elapsed_time`. If the stopwatch is paused, then it will remain paused. Otherwise, it will continue running. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_end` set during initialization. end_in_new_line : Optional[bool]] Wether to log the `description`. If `None`, use `end_in_new_line` set during initialization. """ elapsed_time = self.get_elapsed_time() self.split_elapsed_time.append(elapsed_time) self._cumulative_elapsed_time += elapsed_time self._elapsed_time = datetime.timedelta() if verbose is None: verbose = self.verbose_end if verbose: if end_in_new_line is None: end_in_new_line = self.end_in_new_line if end_in_new_line: self.log("{} done in {}".format(self.description, elapsed_time)) else: self.log(" done in {}".format(elapsed_time)) self._start_time = datetime.datetime.now()
Save the elapsed time of the current split and restart the stopwatch. The current elapsed time will be appended to :attr:`split_elapsed_time`. If the stopwatch is paused, then it will remain paused. Otherwise, it will continue running. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_end` set during initialization. end_in_new_line : Optional[bool]] Wether to log the `description`. If `None`, use `end_in_new_line` set during initialization.
def update_timestamps(self, chan_id, ts): """Updates the timestamp for the given channel id. :param chan_id: :param ts: :return: """ try: self.last_update[chan_id] = ts except KeyError: self.log.warning("Attempted ts update of channel %s, but channel " "not present anymore.", self.channel_directory[chan_id])
Updates the timestamp for the given channel id. :param chan_id: :param ts: :return:
def _create_tokens_for_next_line_dent(self, newline_token): """ Starting from a newline token that isn't followed by another newline token, returns any indent or dedent tokens that immediately follow. If indentation doesn't change, returns None. """ indent_delta = self._get_next_line_indent_delta(newline_token) if indent_delta is None or indent_delta == 0: # Next line's indent isn't relevant OR there was no change in # indentation. return None dent_type = 'INDENT' if indent_delta > 0 else 'DEDENT' dent_token = _create_token( dent_type, '\t', newline_token.lineno + 1, newline_token.lexpos + len(newline_token.value)) tokens = [dent_token] * abs(indent_delta) self.cur_indent += indent_delta return MultiToken(tokens)
Starting from a newline token that isn't followed by another newline token, returns any indent or dedent tokens that immediately follow. If indentation doesn't change, returns None.
def best_match(self, seqs, scan_rc=True): """ give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) """ self.set_threshold(threshold=0.0) for matches in self.scan(seqs, 1, scan_rc): yield [m[0] for m in matches]
give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand)
def zip(*args, **kwargs): """ Returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables (or default if too short). """ args = [list(iterable) for iterable in args] n = max(map(len, args)) v = kwargs.get("default", None) return _zip(*[i + [v] * (n - len(i)) for i in args])
Returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables (or default if too short).
def check_stops( feed: "Feed", *, as_df: bool = False, include_warnings: bool = False ) -> List: """ Analog of :func:`check_agency` for ``feed.stops``. """ table = "stops" problems = [] # Preliminary checks if feed.stops is None: problems.append(["error", "Missing table", table, []]) else: f = feed.stops.copy() problems = check_for_required_columns(problems, table, f) if problems: return format_problems(problems, as_df=as_df) if include_warnings: problems = check_for_invalid_columns(problems, table, f) # Check stop_id problems = check_column_id(problems, table, f, "stop_id") # Check stop_code, stop_desc, zone_id, parent_station for column in ["stop_code", "stop_desc", "zone_id", "parent_station"]: problems = check_column( problems, table, f, column, valid_str, column_required=False ) # Check stop_name problems = check_column(problems, table, f, "stop_name", valid_str) # Check stop_lon and stop_lat if "location_type" in f.columns: requires_location = f["location_type"].isin([0, 1, 2]) else: requires_location = True for column, bound in [("stop_lon", 180), ("stop_lat", 90)]: v = lambda x: pd.notnull(x) and -bound <= x <= bound cond = requires_location & ~f[column].map(v) problems = check_table( problems, table, f, cond, f"{column} out of bounds {[-bound, bound]}", ) # Check stop_url problems = check_column( problems, table, f, "stop_url", valid_url, column_required=False ) # Check location_type v = lambda x: x in range(5) problems = check_column( problems, table, f, "location_type", v, column_required=False ) # Check stop_timezone problems = check_column( problems, table, f, "stop_timezone", valid_timezone, column_required=False, ) # Check wheelchair_boarding v = lambda x: x in range(3) problems = check_column( problems, table, f, "wheelchair_boarding", v, column_required=False ) # Check further location_type and parent_station if "parent_station" in f.columns: if "location_type" not in f.columns: problems.append( [ "error", "parent_station column present but location_type column missing", table, [], ] ) else: # Stations must have location type 1 station_ids = f.loc[ f["parent_station"].notnull(), "parent_station" ] cond = f["stop_id"].isin(station_ids) & (f["location_type"] != 1) problems = check_table( problems, table, f, cond, "A station must have location_type 1" ) # Stations must not lie in stations cond = (f["location_type"] == 1) & f["parent_station"].notnull() problems = check_table( problems, table, f, cond, "A station must not lie in another station", ) # Entrances (type 2), generic nodes (type 3) and boarding areas (type 4) need to be part of a parent cond = ( f["location_type"].isin([2, 3, 4]) & f["parent_station"].isnull() ) problems = check_table( problems, table, f, cond, "Entrances, nodes, and boarding areas must be part of a parent station", ) if include_warnings: # Check for stops without trips s = feed.stop_times["stop_id"] cond = ~feed.stops["stop_id"].isin(s) problems = check_table( problems, table, f, cond, "Stop has no stop times", "warning" ) return format_problems(problems, as_df=as_df)
Analog of :func:`check_agency` for ``feed.stops``.
def init_app(self, app): """绑定app """ if app.config.MONGO_URIS and isinstance(app.config.MONGO_URIS, dict): self.MONGO_URIS = app.config.MONGO_URIS self.app = app else: raise ValueError( "nonstandard sanic config MONGO_URIS,MONGO_URIS must be a Dict[dbname,dburl]") @app.listener("before_server_start") async def init_mongo_connection(app, loop): for dbname, dburl in app.config.MONGO_URIS.items(): if isinstance(dburl,str): db = MongoConnection(dburl,ioloop=loop).db else: db = MongoConnection(ioloop=loop,**dburl).db self.mongodbs[dbname] = db @app.listener("before_server_stop") async def sub_close(app, loop): log.info("mongo connection {numbr}".format(numbr=len(self.mongodbs))) for dbname,db in self.mongodbs.items(): db.client.close log.info("{dbname} connection closed".format(dbname=dbname)) if "extensions" not in app.__dir__(): app.extensions = {} app.extensions['SanicMongo'] = self app.mongo = self.mongodbs return self
绑定app
def paginate_queryset(self, queryset, request, view=None): """ adds `max_count` as a running tally of the largest table size. Used for calculating next/previous links later """ result = super(MultipleModelLimitOffsetPagination, self).paginate_queryset(queryset, request, view) try: if self.max_count < self.count: self.max_count = self.count except AttributeError: self.max_count = self.count try: self.total += self.count except AttributeError: self.total = self.count return result
adds `max_count` as a running tally of the largest table size. Used for calculating next/previous links later
def is_interface_up(interface): """ Checks if an interface is up. :param interface: interface name :returns: boolean """ if sys.platform.startswith("linux"): if interface not in psutil.net_if_addrs(): return False import fcntl SIOCGIFFLAGS = 0x8913 try: with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, interface + '\0' * 256) flags, = struct.unpack('H', result[16:18]) if flags & 1: # check if the up bit is set return True return False except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Exception when checking if {} is up: {}".format(interface, e)) else: # TODO: Windows & OSX support return True
Checks if an interface is up. :param interface: interface name :returns: boolean
def uploadfile(baseurl, filename, format_, token, nonce, cert, method=requests.post): """Uploads file (given by `filename`) to server at `baseurl`. `sesson_key` and `nonce` are string values that get passed as POST parameters. """ filehash = sha1sum(filename) files = {'filedata': open(filename, 'rb')} payload = { 'sha1': filehash, 'filename': os.path.basename(filename), 'token': token, 'nonce': nonce, } return method("%s/sign/%s" % (baseurl, format_), files=files, data=payload, verify=cert)
Uploads file (given by `filename`) to server at `baseurl`. `sesson_key` and `nonce` are string values that get passed as POST parameters.
def _get_magnitude_term(self, C, mag): """ Returns the magnitude scaling term - equation 3 """ if mag >= self.CONSTS["Mh"]: return C["e1"] + C["b3"] * (mag - self.CONSTS["Mh"]) else: return C["e1"] + (C["b1"] * (mag - self.CONSTS["Mh"])) +\ (C["b2"] * (mag - self.CONSTS["Mh"]) ** 2.)
Returns the magnitude scaling term - equation 3
def post(self, object_type, object_id): """Add new tags to an object.""" if object_id == 0: return Response(status=404) tagged_objects = [] for name in request.get_json(force=True): if ':' in name: type_name = name.split(':', 1)[0] type_ = TagTypes[type_name] else: type_ = TagTypes.custom tag = db.session.query(Tag).filter_by(name=name, type=type_).first() if not tag: tag = Tag(name=name, type=type_) tagged_objects.append( TaggedObject( object_id=object_id, object_type=object_type, tag=tag, ), ) db.session.add_all(tagged_objects) db.session.commit() return Response(status=201)
Add new tags to an object.
def express_route_ports_locations(self): """Instance depends on the API version: * 2018-08-01: :class:`ExpressRoutePortsLocationsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsLocationsOperations>` """ api_version = self._get_api_version('express_route_ports_locations') if api_version == '2018-08-01': from .v2018_08_01.operations import ExpressRoutePortsLocationsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2018-08-01: :class:`ExpressRoutePortsLocationsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsLocationsOperations>`
def render(self, doc, context=None, math_option=False, img_path='', css_path=CSS_PATH): """Start thread to render a given documentation""" # If the thread is already running wait for it to finish before # starting it again. if self.wait(): self.doc = doc self.context = context self.math_option = math_option self.img_path = img_path self.css_path = css_path # This causes run() to be executed in separate thread self.start()
Start thread to render a given documentation
def get_z(self, var, coords=None): """ Get the vertical (z-) coordinate of a variable This method searches for the z-coordinate in the :attr:`ds`. It first checks whether there is one dimension that holds an ``'axis'`` attribute with 'Z', otherwise it looks whether there is an intersection between the :attr:`z` attribute and the variables dimensions, otherwise it returns the coordinate corresponding to the third last dimension of `var` (or the second last or last if var is two or one-dimensional) Possible types -------------- var: xarray.Variable The variable to get the z-coordinate for coords: dict Coordinates to use. If None, the coordinates of the dataset in the :attr:`ds` attribute are used. Returns ------- xarray.Coordinate or None The z-coordinate or None if no z coordinate could be found""" coords = coords or self.ds.coords coord = self.get_variable_by_axis(var, 'z', coords) if coord is not None: return coord zname = self.get_zname(var) if zname is not None: return coords.get(zname) return None
Get the vertical (z-) coordinate of a variable This method searches for the z-coordinate in the :attr:`ds`. It first checks whether there is one dimension that holds an ``'axis'`` attribute with 'Z', otherwise it looks whether there is an intersection between the :attr:`z` attribute and the variables dimensions, otherwise it returns the coordinate corresponding to the third last dimension of `var` (or the second last or last if var is two or one-dimensional) Possible types -------------- var: xarray.Variable The variable to get the z-coordinate for coords: dict Coordinates to use. If None, the coordinates of the dataset in the :attr:`ds` attribute are used. Returns ------- xarray.Coordinate or None The z-coordinate or None if no z coordinate could be found
def add_label(self, query_params=None): ''' Create a label for a board. Returns a new Label object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} ) return self.create_label(list_json)
Create a label for a board. Returns a new Label object.
def time_series( self, start_date='-30d', end_date='now', precision=None, distrib=None, tzinfo=None): """ Returns a generator yielding tuples of ``(<datetime>, <value>)``. The data points will start at ``start_date``, and be at every time interval specified by ``precision``. ``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>`` """ start_date = self._parse_date_time(start_date, tzinfo=tzinfo) end_date = self._parse_date_time(end_date, tzinfo=tzinfo) if end_date < start_date: raise ValueError("`end_date` must be greater than `start_date`.") if precision is None: precision = (end_date - start_date) / 30 precision = self._parse_timedelta(precision) if distrib is None: def distrib(dt): return self.generator.random.uniform(0, precision) # noqa if not callable(distrib): raise ValueError( "`distrib` must be a callable. Got {} instead.".format(distrib)) datapoint = start_date while datapoint < end_date: dt = timestamp_to_datetime(datapoint, tzinfo) datapoint += precision yield (dt, distrib(dt))
Returns a generator yielding tuples of ``(<datetime>, <value>)``. The data points will start at ``start_date``, and be at every time interval specified by ``precision``. ``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>``
def from_sites(cls, sites, charge=None, validate_proximity=False, to_unit_cell=False): """ Convenience constructor to make a Structure from a list of sites. Args: sites: Sequence of PeriodicSites. Sites must have the same lattice. validate_proximity (bool): Whether to check if there are sites that are less than 0.01 Ang apart. Defaults to False. to_unit_cell (bool): Whether to translate sites into the unit cell. Returns: (Structure) Note that missing properties are set as None. """ if len(sites) < 1: raise ValueError("You need at least one site to construct a %s" % cls) prop_keys = [] props = {} lattice = None for i, site in enumerate(sites): if not lattice: lattice = site.lattice elif site.lattice != lattice: raise ValueError("Sites must belong to the same lattice") for k, v in site.properties.items(): if k not in prop_keys: prop_keys.append(k) props[k] = [None] * len(sites) props[k][i] = v for k, v in props.items(): if any((vv is None for vv in v)): warnings.warn("Not all sites have property %s. Missing values " "are set to None." % k) return cls(lattice, [site.species for site in sites], [site.frac_coords for site in sites], charge=charge, site_properties=props, validate_proximity=validate_proximity, to_unit_cell=to_unit_cell)
Convenience constructor to make a Structure from a list of sites. Args: sites: Sequence of PeriodicSites. Sites must have the same lattice. validate_proximity (bool): Whether to check if there are sites that are less than 0.01 Ang apart. Defaults to False. to_unit_cell (bool): Whether to translate sites into the unit cell. Returns: (Structure) Note that missing properties are set as None.
def _terminalSymbolsGenerator(self): """Generator of unique terminal symbols used for building the Generalized Suffix Tree. Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols are not part of the input string. """ py2 = sys.version[0] < '3' UPPAs = list(list(range(0xE000,0xF8FF+1)) + list(range(0xF0000,0xFFFFD+1)) + list(range(0x100000, 0x10FFFD+1))) for i in UPPAs: if py2: yield(unichr(i)) else: yield(chr(i)) raise ValueError("To many input strings.")
Generator of unique terminal symbols used for building the Generalized Suffix Tree. Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols are not part of the input string.
def stream_url(self, item, *, device_id=None, quality='hi', session_token=None): """Get a URL to stream a podcast episode, library song, station_song, or store song. Note: Streaming requires a ``device_id`` from a valid, linked mobile device. Parameters: item (str): A podcast episode, library song, station_song, or store song. A Google Music subscription is required to stream store songs. device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps). Default: ``'hi'``. session_token (str): Session token from a station dict required for unsubscribed users to stream a station song. station['sessionToken'] as returend by :meth:`station` only exists for free accounts. Returns: str: A URL to an MP3 file. """ if device_id is None: device_id = self.device_id if 'episodeId' in item: # Podcast episode. response = self._call( mc_calls.PodcastEpisodeStreamURL, item['episodeId'], quality=quality, device_id=device_id ) elif 'wentryid' in item: # Free account station song. response = self._call( mc_calls.RadioStationTrackStreamURL, item['storeId'], item['wentryid'], session_token, quality=quality, device_id=device_id ) elif 'trackId' in item: # Playlist song. response = self._call( mc_calls.TrackStreamURL, item['trackId'], quality=quality, device_id=device_id ) elif 'storeId' in item and self.is_subscribed: # Store song. response = self._call( mc_calls.TrackStreamURL, item['storeId'], quality=quality, device_id=device_id ) elif 'id' in item: # Library song. response = self._call( mc_calls.TrackStreamURL, item['id'], quality=quality, device_id=device_id ) else: # TODO: Create an exception for not being subscribed or use a better builtin exception for this case. if 'storeId' in item and not self.is_subscribed: msg = "Can't stream a store song without a subscription." else: msg = "Item does not contain an ID field." raise ValueError(msg) try: stream_url = response.headers['Location'] except KeyError: stream_url = response.body['url'] return stream_url
Get a URL to stream a podcast episode, library song, station_song, or store song. Note: Streaming requires a ``device_id`` from a valid, linked mobile device. Parameters: item (str): A podcast episode, library song, station_song, or store song. A Google Music subscription is required to stream store songs. device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps). Default: ``'hi'``. session_token (str): Session token from a station dict required for unsubscribed users to stream a station song. station['sessionToken'] as returend by :meth:`station` only exists for free accounts. Returns: str: A URL to an MP3 file.
def get_filename(self, task, default_ext): """Set the path where the image will be saved. The default strategy is to use an increasing 6-digit number as the filename. You can override this method if you want to set custom naming rules. The file extension is kept if it can be obtained from the url, otherwise ``default_ext`` is used as extension. Args: task (dict): The task dict got from ``task_queue``. Output: Filename with extension. """ url_path = urlparse(task['file_url'])[2] extension = url_path.split('.')[-1] if '.' in url_path else default_ext file_idx = self.fetched_num + self.file_idx_offset return '{:06d}.{}'.format(file_idx, extension)
Set the path where the image will be saved. The default strategy is to use an increasing 6-digit number as the filename. You can override this method if you want to set custom naming rules. The file extension is kept if it can be obtained from the url, otherwise ``default_ext`` is used as extension. Args: task (dict): The task dict got from ``task_queue``. Output: Filename with extension.
def get_consistent_resource(self): """ :return a payment that you can trust. :rtype Payment """ http_client = HttpClient() response, _ = http_client.get(routes.url(routes.PAYMENT_RESOURCE, resource_id=self.id)) return Payment(**response)
:return a payment that you can trust. :rtype Payment
def write_grid_tpl(name, tpl_file, suffix, zn_array=None, shape=None, spatial_reference=None,longnames=False): """ write a grid-based template file Parameters ---------- name : str the base parameter name tpl_file : str the template file to write - include path zn_array : numpy.ndarray an array used to skip inactive cells Returns ------- df : pandas.DataFrame a dataframe with parameter information """ if shape is None and zn_array is None: raise Exception("must pass either zn_array or shape") elif shape is None: shape = zn_array.shape parnme, x, y = [], [], [] with open(tpl_file, 'w') as f: f.write("ptf ~\n") for i in range(shape[0]): for j in range(shape[1]): if zn_array is not None and zn_array[i, j] < 1: pname = ' 1.0 ' else: if longnames: pname = "{0}_i:{0}_j:{1}_{2}".format(name,i,j,suffix) if spatial_reference is not None: pname += "_x:{0:10.2E}_y:{1:10.2E}".format(sr.xcentergrid[i,j], sr.ycentergrid[i,j]) else: pname = "{0}{1:03d}{2:03d}".format(name, i, j) if len(pname) > 12: raise("grid pname too long:{0}". \ format(pname)) parnme.append(pname) pname = ' ~ {0} ~ '.format(pname) if spatial_reference is not None: x.append(spatial_reference.xcentergrid[i, j]) y.append(spatial_reference.ycentergrid[i, j]) f.write(pname) f.write("\n") df = pd.DataFrame({"parnme": parnme}, index=parnme) if spatial_reference is not None: df.loc[:,'x'] = x df.loc[:,'y'] = y df.loc[:, "pargp"] = "{0}{1}".format(suffix.replace('_', ''), name) df.loc[:, "tpl"] = tpl_file return df
write a grid-based template file Parameters ---------- name : str the base parameter name tpl_file : str the template file to write - include path zn_array : numpy.ndarray an array used to skip inactive cells Returns ------- df : pandas.DataFrame a dataframe with parameter information
def abs_timedelta(delta): """Returns an "absolute" value for a timedelta, always representing a time distance.""" if delta.days < 0: now = _now() return now - (now + delta) return delta
Returns an "absolute" value for a timedelta, always representing a time distance.
def attributes_js(cls, attributes): """ Generates JS code to look up attributes on JS objects from an attributes specification dictionary. If the specification references a plotting particular plotting handle it will also generate JS code to get the ID of the object. Simple example (when referencing cb_data or cb_obj): Input : {'x': 'cb_data.geometry.x'} Output : data['x'] = cb_data['geometry']['x'] Example referencing plot handle: Input : {'x0': 'x_range.attributes.start'} Output : if ((x_range !== undefined)) { data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']} } """ assign_template = '{assign}{{id: {obj_name}["id"], value: {obj_name}{attr_getters}}};\n' conditional_template = 'if (({obj_name} != undefined)) {{ {assign} }}' code = '' for key, attr_path in sorted(attributes.items()): data_assign = 'data["{key}"] = '.format(key=key) attrs = attr_path.split('.') obj_name = attrs[0] attr_getters = ''.join(['["{attr}"]'.format(attr=attr) for attr in attrs[1:]]) if obj_name not in ['cb_obj', 'cb_data']: assign_str = assign_template.format( assign=data_assign, obj_name=obj_name, attr_getters=attr_getters ) code += conditional_template.format( obj_name=obj_name, assign=assign_str ) else: assign_str = ''.join([data_assign, obj_name, attr_getters, ';\n']) code += assign_str return code
Generates JS code to look up attributes on JS objects from an attributes specification dictionary. If the specification references a plotting particular plotting handle it will also generate JS code to get the ID of the object. Simple example (when referencing cb_data or cb_obj): Input : {'x': 'cb_data.geometry.x'} Output : data['x'] = cb_data['geometry']['x'] Example referencing plot handle: Input : {'x0': 'x_range.attributes.start'} Output : if ((x_range !== undefined)) { data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']} }
def linspace(start, stop, num, decimals=18): """ Returns a list of evenly spaced numbers over a specified interval. Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py :param start: starting value :type start: float :param stop: end value :type stop: float :param num: number of samples to generate :type num: int :param decimals: number of significands :type decimals: int :return: a list of equally spaced numbers :rtype: list """ start = float(start) stop = float(stop) if abs(start - stop) <= 10e-8: return [start] num = int(num) if num > 1: div = num - 1 delta = stop - start return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div))))) for x in range(num)] return [float(("{:." + str(decimals) + "f}").format(start))]
Returns a list of evenly spaced numbers over a specified interval. Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py :param start: starting value :type start: float :param stop: end value :type stop: float :param num: number of samples to generate :type num: int :param decimals: number of significands :type decimals: int :return: a list of equally spaced numbers :rtype: list
def setup_signals(self, ): """Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None """ self.duplicate_tb.clicked.connect(self.duplicate) self.delete_tb.clicked.connect(self.delete) self.load_tb.clicked.connect(self.load) self.unload_tb.clicked.connect(self.unload) self.reference_tb.clicked.connect(self.reference) self.importtf_tb.clicked.connect(self.import_file) self.importref_tb.clicked.connect(self.import_reference) self.replace_tb.clicked.connect(self.replace) self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb)) self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb))
Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None
def _get_stddevs(self, C, stddev_types, num_sites, mag): """ Returns standard deviation as defined in equation 23, page 2291 (Tavakoli and Pezeshk, 2005) """ stddevs = [] sigma = (C['c14'] + C['c15'] * mag) if mag < 7.2 else C['c16'] vals = sigma * np.ones((num_sites)) for _ in stddev_types: stddevs.append(vals) return stddevs
Returns standard deviation as defined in equation 23, page 2291 (Tavakoli and Pezeshk, 2005)
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, verbose=True): """Send packets at layer 2 using tcpreplay for performance pps: packets per second mpbs: MBits per second realtime: use packet's timestamp, bending time with realtime value loop: number of times to process the packet list file_cache: cache packets in RAM instead of reading from disk at each iteration iface: output interface verbose: if False, discard tcpreplay output """ if iface is None: iface = conf.iface argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ] if pps is not None: argv.append("--pps=%i" % pps) elif mbps is not None: argv.append("--mbps=%f" % mbps) elif realtime is not None: argv.append("--multiplier=%i" % realtime) else: argv.append("--topspeed") if not verbose: argv.append("-q") if loop: argv.append("--loop=%i" % loop) if file_cache: argv.append("--enable-file-cache") f = get_temp_file() argv.append(f) wrpcap(f, x) with open(os.devnull, "wb") as null: proc_output = null if not verbose else None try: subprocess.check_call(argv, stdout=proc_output, stderr=proc_output) except KeyboardInterrupt: log_interactive.info("Interrupted by user") except Exception as e: log_interactive.error("while trying to exec [%s]: %s" % (argv[0],e)) finally: os.unlink(f)
Send packets at layer 2 using tcpreplay for performance pps: packets per second mpbs: MBits per second realtime: use packet's timestamp, bending time with realtime value loop: number of times to process the packet list file_cache: cache packets in RAM instead of reading from disk at each iteration iface: output interface verbose: if False, discard tcpreplay output
def extract_common_fields(self, data): """Extract fields from a basic user query.""" email = None for curr_email in data.get("emails", []): email = email or curr_email.get("email") if curr_email.get("verified", False) and \ curr_email.get("primary", False): email = curr_email.get("email") return dict( email=email, id=data.get('id'), name=data.get('name'), first_name=data.get('first_name'), last_name=data.get('last_name'), image_url=data.get('image_url') )
Extract fields from a basic user query.
def sphere_pos(self, x): """Sphere (squared norm) test objective function""" # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0] c = 0.0 if x[0] < c: return np.nan return -c**2 + sum((x + 0)**2)
Sphere (squared norm) test objective function
def get_location(self,callb=None): """Convenience method to request the location from the device This method will check whether the value has already been retrieved from the device, if so, it will simply return it. If no, it will request the information from the device and request that callb be executed when a response is received. The default callback will simply cache the value. :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: The cached value :rtype: str """ if self.location is None: mypartial=partial(self.resp_set_location) if callb: mycallb=lambda x,y:(mypartial(y),callb(x,y)) else: mycallb=lambda x,y:mypartial(y) response = self.req_with_resp(GetLocation, StateLocation,callb=mycallb ) return self.location
Convenience method to request the location from the device This method will check whether the value has already been retrieved from the device, if so, it will simply return it. If no, it will request the information from the device and request that callb be executed when a response is received. The default callback will simply cache the value. :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: The cached value :rtype: str
def get_host_lock(url): """Get lock object for given URL host.""" hostname = get_hostname(url) return host_locks.setdefault(hostname, threading.Lock())
Get lock object for given URL host.
def merge(self, conflicted, tables=[], diff_only=True): """ Merges specific **tables** or all tables of **conflicted** database into the master database. Parameters ---------- conflicted: str The path of the SQL database to be merged into the master. tables: list (optional) The list of tables to merge. If None, all tables are merged. diff_only: bool If True, only prints the differences of each table and doesn't actually merge anything. """ if os.path.isfile(conflicted): # Load and attach master and conflicted databases con, master, reassign = Database(conflicted), self.list("PRAGMA database_list").fetchall()[0][2], {} con.modify("ATTACH DATABASE '{}' AS m".format(master), verbose=False) self.modify("ATTACH DATABASE '{}' AS c".format(conflicted), verbose=False) con.modify("ATTACH DATABASE '{}' AS c".format(conflicted), verbose=False) self.modify("ATTACH DATABASE '{}' AS m".format(master), verbose=False) # Drop any backup tables from failed merges for table in tables: self.modify("DROP TABLE IF EXISTS Backup_{0}".format(table), verbose=False) # Gather user data to add to CHANGELOG table import socket, datetime if not diff_only: user = get_input('Please enter your name : ') machine_name = socket.gethostname() date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") modified_tables = [] # Merge table by table, starting with SOURCES if not isinstance(tables, type(list())): tables = [tables] tables = tables or ['sources'] + [t for t in zip(*self.list( "SELECT * FROM sqlite_master WHERE name NOT LIKE '%Backup%' AND name!='sqlite_sequence' AND type='table'{}".format( " AND name IN ({})".format("'" + "','".join(tables) + "'") if tables else '')))[1] if t != 'sources'] for table in tables: # Get column names and data types from master table and column names from conflicted table metadata = self.query("PRAGMA table_info({})".format(table), fmt='table') columns, types, constraints = [np.array(metadata[n]) for n in ['name', 'type', 'notnull']] # columns, types, constraints = self.query("PRAGMA table_info({})".format(table), unpack=True)[1:4] conflicted_cols = con.query("PRAGMA table_info({})".format(table), unpack=True)[1] if any([i not in columns for i in conflicted_cols]): # Abort table merge if conflicted has new columns not present in master. New columns must be added to the master database first via db.edit_columns(). print( "\nMerge of {0} table aborted since conflicted copy has columns {1} not present in master.\nAdd new columns to master with astrodb.table() method and try again.\n".format( table.upper(), [i for i in conflicted_cols if i not in columns])) else: # Add new columns from master table to conflicted table if necessary if any([i not in conflicted_cols for i in columns]): con.modify("DROP TABLE IF EXISTS Conflicted_{0}".format(table)) con.modify("ALTER TABLE {0} RENAME TO Conflicted_{0}".format(table)) # TODO: Update to allow multiple primary and foreign keys con.modify("CREATE TABLE {0} ({1})".format(table, ', '.join( \ ['{} {} {}{}'.format(c, t, r, ' UNIQUE PRIMARY KEY' if c == 'id' else '') \ for c, t, r in zip(columns, types, constraints * ['NOT NULL'])]))) con.modify("INSERT INTO {0} ({1}) SELECT {1} FROM Conflicted_{0}".format(table, ','.join( conflicted_cols))) con.modify("DROP TABLE Conflicted_{0}".format(table)) # Pull unique records from conflicted table data = map(list, con.list( "SELECT * FROM (SELECT 1 AS db, {0} FROM m.{2} UNION ALL SELECT 2 AS db, {0} FROM c.{2}) GROUP BY {1} HAVING COUNT(*)=1 AND db=2".format( ','.join(columns), ','.join(columns[1:]), table)).fetchall()) if data: # Just print the table differences if diff_only: pprint(zip(*data)[1:], names=columns, title='New {} records'.format(table.upper())) # Add new records to the master and then clean up tables else: # Make temporary table copy so changes can be undone at any time self.modify("DROP TABLE IF EXISTS Backup_{0}".format(table), verbose=False) self.modify("ALTER TABLE {0} RENAME TO Backup_{0}".format(table), verbose=False) self.modify("CREATE TABLE {0} ({1})".format(table, ', '.join( \ ['{} {} {}{}'.format(c, t, r, ' UNIQUE PRIMARY KEY' if c == 'id' else '') \ for c, t, r in zip(columns, types, constraints * ['NOT NULL'])])), verbose=False) self.modify( "INSERT INTO {0} ({1}) SELECT {1} FROM Backup_{0}".format(table, ','.join(columns)), verbose=False) # Create a dictionary of any reassigned ids from merged SOURCES tables and replace applicable source_ids in other tables. print("\nMerging {} tables.\n".format(table.upper())) try: count = self.query("SELECT MAX(id) FROM {}".format(table), fetch='one')[0] + 1 except TypeError: count = 1 for n, i in enumerate([d[1:] for d in data]): if table == 'sources': reassign[i[0]] = count elif 'source_id' in columns and i[1] in reassign.keys(): i[1] = reassign[i[1]] else: pass i[0] = count data[n] = i count += 1 # Insert unique records into master for d in data: self.modify( "INSERT INTO {} VALUES({})".format(table, ','.join(['?' for c in columns])), d, verbose=False) pprint(zip(*data), names=columns, title="{} records added to {} table at '{}':".format(len(data), table, master)) # Run clean_up on the table to check for conflicts abort = self.clean_up(table) # Undo all changes to table if merge is aborted. Otherwise, push table changes to master. if abort: self.modify("DROP TABLE {0}".format(table), verbose=False) self.modify("ALTER TABLE Backup_{0} RENAME TO {0}".format(table), verbose=False) else: self.modify("DROP TABLE Backup_{0}".format(table), verbose=False) modified_tables.append(table.upper()) else: print("\n{} tables identical.".format(table.upper())) # Add data to CHANGELOG table if not diff_only: user_description = get_input('\nPlease describe the changes made in this merge: ') self.list("INSERT INTO changelog VALUES(?, ?, ?, ?, ?, ?, ?)", \ (None, date, str(user), machine_name, ', '.join(modified_tables), user_description, os.path.basename(conflicted))) # Finish up and detach if diff_only: print("\nDiff complete. No changes made to either database. Set `diff_only=False' to apply merge.") else: print("\nMerge complete!") con.modify("DETACH DATABASE c", verbose=False) self.modify("DETACH DATABASE c", verbose=False) con.modify("DETACH DATABASE m", verbose=False) self.modify("DETACH DATABASE m", verbose=False) else: print("File '{}' not found!".format(conflicted))
Merges specific **tables** or all tables of **conflicted** database into the master database. Parameters ---------- conflicted: str The path of the SQL database to be merged into the master. tables: list (optional) The list of tables to merge. If None, all tables are merged. diff_only: bool If True, only prints the differences of each table and doesn't actually merge anything.
def validate_data_table(data_table, sed=None): """ Validate all columns of a data table. If a list of tables is passed, all tables will be validated and then concatenated Parameters ---------- data_table : `astropy.table.Table` or list of `astropy.table.Table`. sed : bool, optional Whether to convert the fluxes to SED. If unset, all data tables are converted to the format of the first data table. """ if isinstance(data_table, Table) or isinstance(data_table, QTable): data_table = [data_table] try: for dt in data_table: if not isinstance(dt, Table) and not isinstance(dt, QTable): raise TypeError( "An object passed as data_table is not an astropy Table!" ) except TypeError: raise TypeError( "Argument passed to validate_data_table is not a table and " "not a list" ) def dt_sed_conversion(dt, sed): f_unit, sedf = sed_conversion(dt["energy"], dt["flux"].unit, sed) # roundtrip to Table to change the units t = Table(dt) for col in ["flux", "flux_error_lo", "flux_error_hi"]: t[col].unit = f_unit ndt = QTable(t) ndt["flux"] = (dt["flux"] * sedf).to(f_unit) ndt["flux_error_lo"] = (dt["flux_error_lo"] * sedf).to(f_unit) ndt["flux_error_hi"] = (dt["flux_error_hi"] * sedf).to(f_unit) return ndt data_list = [] for group, dt in enumerate(data_table): dt_val = _validate_single_data_table(dt, group=group) data_list.append(dt_val) # concatenate input data tables data_new = data_list[0].copy() f_pt = data_new["flux"].unit.physical_type if sed is None: sed = f_pt in ["flux", "power"] data_new = dt_sed_conversion(data_new, sed) for dt in data_list[1:]: nf_pt = dt["flux"].unit.physical_type if ("flux" in nf_pt and "power" in f_pt) or ( "power" in nf_pt and "flux" in f_pt ): raise TypeError( "The physical types of the data tables could not be " "matched: Some are in flux and others in luminosity units" ) dt = dt_sed_conversion(dt, sed) for row in dt: data_new.add_row(row) return data_new
Validate all columns of a data table. If a list of tables is passed, all tables will be validated and then concatenated Parameters ---------- data_table : `astropy.table.Table` or list of `astropy.table.Table`. sed : bool, optional Whether to convert the fluxes to SED. If unset, all data tables are converted to the format of the first data table.
def render_footer(self, ctx, data): """ Render any required static content in the footer, from the C{staticContent} attribute of this page. """ if self.staticContent is None: return ctx.tag header = self.staticContent.getFooter() if header is not None: return ctx.tag[header] else: return ctx.tag
Render any required static content in the footer, from the C{staticContent} attribute of this page.
def id_lookup(paper_id, idtype): """Take an ID of type PMID, PMCID, or DOI and lookup the other IDs. If the DOI is not found in Pubmed, try to obtain the DOI by doing a reverse-lookup of the DOI in CrossRef using article metadata. Parameters ---------- paper_id : str ID of the article. idtype : str Type of the ID: 'pmid', 'pmcid', or 'doi Returns ------- ids : dict A dictionary with the following keys: pmid, pmcid and doi. """ if idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) ids = {'doi': None, 'pmid': None, 'pmcid': None} pmc_id_results = pmc_client.id_lookup(paper_id, idtype) # Start with the results of the PMC lookup and then override with the # provided ID ids['pmid'] = pmc_id_results.get('pmid') ids['pmcid'] = pmc_id_results.get('pmcid') ids['doi'] = pmc_id_results.get('doi') ids[idtype] = paper_id # If we gave a DOI, then our work is done after looking for PMID and PMCID if idtype == 'doi': return ids # If we gave a PMID or PMCID, we need to check to see if we got a DOI. # If we got a DOI back, we're done. elif ids.get('doi'): return ids # If we get here, then we've given PMID or PMCID and don't have a DOI yet. # If we gave a PMCID and have neither a PMID nor a DOI, then we'll run # into problems later on when we try to the reverse lookup using CrossRef. # So we bail here and return what we have (PMCID only) with a warning. if ids.get('pmcid') and ids.get('doi') is None and ids.get('pmid') is None: logger.warning('%s: PMCID without PMID or DOI' % ids.get('pmcid')) return ids # To clarify the state of things at this point: assert ids.get('pmid') is not None assert ids.get('doi') is None # As a last result, we try to get the DOI from CrossRef (which internally # tries to get the DOI from Pubmed in the process of collecting the # necessary metadata for the lookup): ids['doi'] = crossref_client.doi_query(ids['pmid']) # It may still be None, but at this point there's nothing we can do... return ids
Take an ID of type PMID, PMCID, or DOI and lookup the other IDs. If the DOI is not found in Pubmed, try to obtain the DOI by doing a reverse-lookup of the DOI in CrossRef using article metadata. Parameters ---------- paper_id : str ID of the article. idtype : str Type of the ID: 'pmid', 'pmcid', or 'doi Returns ------- ids : dict A dictionary with the following keys: pmid, pmcid and doi.
def get_mime_message(subject, text): """Creates MIME message :param subject: Subject of email :param text: Email content :return: Email formatted as HTML ready to be sent """ message = MIMEText( "<html>" + str(text).replace("\n", "<br>") + "</html>", "html" ) message["subject"] = str(subject) return message
Creates MIME message :param subject: Subject of email :param text: Email content :return: Email formatted as HTML ready to be sent
def get_cache_key(brain_or_object): """Generate a cache key for a common brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Cache Key :rtype: str """ key = [ get_portal_type(brain_or_object), get_id(brain_or_object), get_uid(brain_or_object), # handle different domains gracefully get_url(brain_or_object), # Return the microsecond since the epoch in GMT get_modification_date(brain_or_object).micros(), ] return "-".join(map(lambda x: str(x), key))
Generate a cache key for a common brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Cache Key :rtype: str
def new_portfolio(self, portfolio_cookie=None): ''' 根据 self.user_cookie 创建一个 portfolio :return: 如果存在 返回 新建的 QA_Portfolio 如果已经存在 返回 这个portfolio ''' _portfolio = QA_Portfolio( user_cookie=self.user_cookie, portfolio_cookie=portfolio_cookie ) if _portfolio.portfolio_cookie not in self.portfolio_list.keys(): self.portfolio_list[_portfolio.portfolio_cookie] = _portfolio return _portfolio else: print( " prortfolio with user_cookie ", self.user_cookie, " already exist!!" ) return self.portfolio_list[portfolio_cookie]
根据 self.user_cookie 创建一个 portfolio :return: 如果存在 返回 新建的 QA_Portfolio 如果已经存在 返回 这个portfolio
def _api_handler(self, *args, **kwargs): """ Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body """ keyword_arguments = {} keyword_arguments.update(self.keywords) keyword_arguments.update(kwargs) return api_handler(*args, **keyword_arguments)
Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body
def display_grid_scores(grid_scores, top=None): """Helper function to format a report on a grid of scores""" grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True) if top is not None: grid_scores = grid_scores[:top] # Compute a threshold for staring models with overlapping stderr: _, best_mean, best_scores = grid_scores[0] threshold = best_mean - 2 * sem(best_scores) for params, mean_score, scores in grid_scores: append_star = mean_score + 2 * sem(scores) > threshold print(display_scores(params, scores, append_star=append_star))
Helper function to format a report on a grid of scores
def _get_nameservers(domain): """ Looks for domain nameservers and returns the IPs of the nameservers as a list. The list is empty, if no nameservers were found. Needed associated domain zone name for lookup. """ nameservers = [] rdtypes_ns = ['SOA', 'NS'] rdtypes_ip = ['A', 'AAAA'] for rdtype_ns in rdtypes_ns: for rdata_ns in Provider._dns_lookup(domain, rdtype_ns): for rdtype_ip in rdtypes_ip: for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split(' ')[0], rdtype_ip): if rdata_ip.to_text() not in nameservers: nameservers.append(rdata_ip.to_text()) LOGGER.debug('DNS Lookup => %s IN NS %s', domain, ' '.join(nameservers)) return nameservers
Looks for domain nameservers and returns the IPs of the nameservers as a list. The list is empty, if no nameservers were found. Needed associated domain zone name for lookup.
def get_fastq_files(directory, lane, fc_name): """Retrieve fastq files for the given lane, ready to process. """ files = glob.glob(os.path.join(directory, "%s_*%s*txt*" % (lane, fc_name))) files.sort() if len(files) > 2 or len(files) == 0: raise ValueError("Did not find correct files for %s %s %s %s" % (directory, lane, fc_name, files)) ready_files = [] for fname in files: if fname.endswith(".gz"): cl = ["gunzip", fname] subprocess.check_call(cl) ready_files.append(os.path.splitext(fname)[0]) else: ready_files.append(fname) return ready_files[0], (ready_files[1] if len(ready_files) > 1 else None)
Retrieve fastq files for the given lane, ready to process.
def visit_tryfinally(self, node, parent): """visit a TryFinally node by returning a fresh instance of it""" newnode = nodes.TryFinally(node.lineno, node.col_offset, parent) newnode.postinit( [self.visit(child, newnode) for child in node.body], [self.visit(n, newnode) for n in node.finalbody], ) return newnode
visit a TryFinally node by returning a fresh instance of it
def _set_prior(self, prior): """Set prior for this parameter. The prior must be a function accepting the current value of the parameter as input and giving the probability density as output.""" if prior is None: # Removing prior self._prior = None else: # Try and call the prior with the current value of the parameter try: _ = prior(self.value) except: raise NotCallableOrErrorInCall("Could not call the provided prior. " + "Is it a function accepting the current value of the parameter?") try: prior.set_units(self.unit, u.dimensionless_unscaled) except AttributeError: raise NotCallableOrErrorInCall("It looks like the provided prior is not a astromodels function.") self._prior = prior
Set prior for this parameter. The prior must be a function accepting the current value of the parameter as input and giving the probability density as output.
def consecutiveness(password, consecutive_length=3): """ Consecutiveness is the enemy of entropy, but makes it easier to remember. :param str password: :param int consecutive_length: length of the segment to be uniform to consider loss of entropy :param int base_length: usual length of the password :return int: in range 0-1 >>> Complexity.consecutiveness('password') 1.0 >>> Complexity.consecutiveness('PaSsWoRd') 0.0 >>> Complexity.consecutiveness('yio') 0 """ consec = 0 for i in range(len(password) - consecutive_length): if all([char.islower() for char in password[i:i+consecutive_length]]): consec += 1 elif all([char.islower() for char in password[i:i+consecutive_length]]): consec += 1 try: return consec / (len(password) - consecutive_length) except ZeroDivisionError: return 0
Consecutiveness is the enemy of entropy, but makes it easier to remember. :param str password: :param int consecutive_length: length of the segment to be uniform to consider loss of entropy :param int base_length: usual length of the password :return int: in range 0-1 >>> Complexity.consecutiveness('password') 1.0 >>> Complexity.consecutiveness('PaSsWoRd') 0.0 >>> Complexity.consecutiveness('yio') 0
def parse_root(raw): "Efficiently parses the root element of a *raw* XML document, returning a tuple of its qualified name and attribute dictionary." if sys.version < '3': fp = StringIO(raw) else: fp = BytesIO(raw.encode('UTF-8')) for event, element in etree.iterparse(fp, events=('start',)): return (element.tag, element.attrib)
Efficiently parses the root element of a *raw* XML document, returning a tuple of its qualified name and attribute dictionary.
def taxids(self): """Distinct NCBI taxonomy identifiers (``taxid``) in :class:`.models.Entry` :return: NCBI taxonomy identifiers :rtype: list[int] """ r = self.session.query(distinct(models.Entry.taxid)).all() return [x[0] for x in r]
Distinct NCBI taxonomy identifiers (``taxid``) in :class:`.models.Entry` :return: NCBI taxonomy identifiers :rtype: list[int]
def typed_encode(self, r): """ :param record: expecting id and value properties :return: dict with id and json properties """ try: value = r.get('value') if "json" in r: value = json2value(r["json"]) elif is_data(value) or value != None: pass else: from mo_logs import Log raise Log.error("Expecting every record given to have \"value\" or \"json\" property") _buffer = UnicodeBuilder(1024) net_new_properties = [] path = [] if is_data(value): given_id = self.get_id(value) value['_id'] = None version = self.get_version(value) else: given_id = None version = None if given_id: record_id = r.get('id') if record_id and record_id != given_id: from mo_logs import Log raise Log.error( "expecting {{property}} of record ({{record_id|quote}}) to match one given ({{given|quote}})", property=self.id_info, record_id=record_id, given=given_id ) else: record_id = r.get('id') if record_id: given_id = record_id else: given_id = random_id() typed_encode(value, self.schema, path, net_new_properties, _buffer) json = _buffer.build() return given_id, version, json except Exception as e: # THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS from mo_logs import Log Log.error("Serialization of JSON problems", cause=e)
:param record: expecting id and value properties :return: dict with id and json properties
def exception_handler(job, *exc_info): """ Called by RQ when there is a failure in a worker. NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker. """ # Report data about the job with the exception. job_info = job.to_dict() # job_info['data'] is the pickled representation of the job, and doesn't json-serialize well. # repr() works nicely. job_info['data'] = repr(job_info['data']) extra_data = {'job': job_info} payload_data = {'framework': 'rq'} rollbar.report_exc_info(exc_info, extra_data=extra_data, payload_data=payload_data) # continue to the next handler return True
Called by RQ when there is a failure in a worker. NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker.
def validate(nanopub: dict, error_level: str = "WARNING") -> Tuple[str, str, str]: """Validate Nanopub Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR The validation result is a list of objects containing { 'level': 'Warning|Error', 'section': 'Assertion|Annotation|Structure', 'label': '{Error|Warning}-{Assertion|Annotation|Structure}', # to be used for faceting in Elasticsearch 'index': idx, # Index of Assertion or Annotation in Nanopub - starts at 0 'msg': msg, # Error or Warning message } Args: nanopub: nanopub record starting with nanopub... level: return WARNING or just ERROR? defaults to warnings and errors Returns: list(tuples): [{'level': 'Warning', 'section': 'Assertion', 'label': 'Warning-Assertion', 'index': 0, 'msg': <msg>}] """ # Validation results v = [] bel_version = config["bel"]["lang"]["default_bel_version"] # Structural checks try: if not isinstance(nanopub["nanopub"]["assertions"], list): msg = "Assertions must be a list/array" v.append( { "level": "Error", "section": "Structure", "label": "Error-Structure", "msg": msg, "msg_html": msg, } ) except Exception as e: msg = 'Missing nanopub["nanopub"]["assertions"]' v.append( { "level": "Error", "section": "Structure", "label": "Error-Structure", "msg": msg, "msg_html": msg, } ) try: if ( "name" in nanopub["nanopub"]["type"] and "version" in nanopub["nanopub"]["type"] ): pass if nanopub["nanopub"]["type"]["name"].upper() == "BEL": bel_version = nanopub["nanopub"]["type"]["version"] except Exception as e: msg = 'Missing or badly formed type - must have nanopub["nanopub"]["type"] = {"name": <name>, "version": <version}' v.append( { "level": "Error", "section": "Structure", "label": "Error-Structure", "msg": msg, "msg_html": msg, } ) try: for key in ["uri", "database", "reference"]: if key in nanopub["nanopub"]["citation"]: break else: msg = 'nanopub["nanopub"]["citation"] must have either a uri, database or reference key.' v.append( { "level": "Error", "section": "Structure", "label": "Error-Structure", "msg": msg, "msg_html": msg, } ) except Exception as e: msg = 'nanopub["nanopub"] must have a "citation" key with either a uri, database or reference key.' v.append( { "level": "Error", "section": "Structure", "label": "Error-Structure", "msg": msg, "msg_html": msg, } ) # Assertion checks if "assertions" in nanopub["nanopub"]: for idx, assertion in enumerate(nanopub["nanopub"]["assertions"]): bo = bel.lang.belobj.BEL( bel_version, config["bel_api"]["servers"]["api_url"] ) belstr = f'{assertion.get("subject")} {assertion.get("relation", "")} {assertion.get("object", "")}' belstr = belstr.replace("None", "") try: messages = ( bo.parse(belstr) .semantic_validation(error_level=error_level) .validation_messages ) for message in messages: (level, msg) = message if error_level == "ERROR": if level == "ERROR": v.append( { "level": f"{level.title()}", "section": "Assertion", "label": f"{level.title()}-Assertion", "index": idx, "msg": msg, "msg_html": convert_msg_to_html(msg), } ) else: v.append( { "level": f"{level.title()}", "section": "Assertion", "label": f"{level.title()}-Assertion", "index": idx, "msg": msg, "msg_html": convert_msg_to_html(msg), } ) except Exception as e: msg = f"Could not parse: {belstr}" v.append( { "level": "Error", "section": "Assertion", "label": "Error-Assertion", "index": idx, "msg": msg, "msg_html": msg, } ) log.exception(f"Could not parse: {belstr}") # Annotation checks if error_level == "WARNING": for idx, annotation in enumerate(nanopub["nanopub"].get("annotations", [])): term_type = annotation["type"] term_id = annotation["id"] # term_label = annotation['label'] log.info(f"Annotation: {term_type} ID: {term_id}") search_body = { "_source": ["src_id", "id", "name", "label", "annotation_types"], "query": {"term": {"id": term_id}}, } results = es.search(index="terms", doc_type="term", body=search_body) if len(results["hits"]["hits"]) > 0: result = results["hits"]["hits"][0]["_source"] if term_type not in result["annotation_types"]: msg = f'Annotation type: {term_type} for {term_id} does not match annotation types in database: {result["annotation_types"]}' v.append( { "level": "Warning", "section": "Annotation", "index": idx, "label": "Warning-Annotation", "msg": msg, "msg_html": msg, } ) else: msg = f"Annotation term: {term_id} not found in database" v.append( { "level": "Warning", "section": "Annotation", "index": idx, "label": "Warning-Annotation", "msg": msg, "msg_html": msg, } ) return v
Validate Nanopub Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR The validation result is a list of objects containing { 'level': 'Warning|Error', 'section': 'Assertion|Annotation|Structure', 'label': '{Error|Warning}-{Assertion|Annotation|Structure}', # to be used for faceting in Elasticsearch 'index': idx, # Index of Assertion or Annotation in Nanopub - starts at 0 'msg': msg, # Error or Warning message } Args: nanopub: nanopub record starting with nanopub... level: return WARNING or just ERROR? defaults to warnings and errors Returns: list(tuples): [{'level': 'Warning', 'section': 'Assertion', 'label': 'Warning-Assertion', 'index': 0, 'msg': <msg>}]
def createKeyboardTab(self): ''' KEYBOARD ''' _keyboardList = [ 'KEYCODE_1', 'KEYCODE_2', 'KEYCODE_3', 'KEYCODE_4', 'KEYCODE_5', 'KEYCODE_6', 'KEYCODE_7', 'KEYCODE_8', 'KEYCODE_9', 'KEYCODE_0', 'KEYCODE_Q', 'KEYCODE_W', 'KEYCODE_E', 'KEYCODE_R', 'KEYCODE_T', 'KEYCODE_Y', 'KEYCODE_U', 'KEYCODE_I', 'KEYCODE_O', 'KEYCODE_P', 'KEYCODE_A', 'KEYCODE_S', 'KEYCODE_D', 'KEYCODE_F', 'KEYCODE_G', 'KEYCODE_H', 'KEYCODE_J', 'KEYCODE_K', 'KEYCODE_L', 'KEYCODE_DEL', 'KEYCODE_Z', 'KEYCODE_X', 'KEYCODE_C', 'KEYCODE_V', 'KEYCODE_B', 'KEYCODE_N', 'KEYCODE_M', 'KEYCODE_.', 'KEYCODE_SPACE', 'KEYCODE_GO' ] for keyboard in _keyboardList: _cpb = ControlPanelButton(self.keyboardTab, self.culebron, self.printOperation, value=keyboard, text=keyboard[8:], width=Layout.BUTTON_WIDTH, bg=self.bg, fg=self.fg, highlightbackground=self.highlightbackground) _cpb.configure(command=_cpb.command) _cpb.grid(column=self.childWindow.column, row=self.childWindow.row) self.tabLayout()
KEYBOARD
def stage_http_response2(self, payload): """Log complete http response, including response1 and payload""" # required because http code uses sending all None to reset # parameters. We ignore that if not self._http_response_version and not payload: return if self.enabled and self.http_detail_level is not None and \ self.httplogger.isEnabledFor(logging.DEBUG): if self._http_response_headers: header_str = \ ' '.join('{0}:{1!r}'.format(k, v) for k, v in self._http_response_headers.items()) else: header_str = '' if self.http_detail_level == 'summary': upayload = "" elif self.http_maxlen and (len(payload) > self.http_maxlen): upayload = (_ensure_unicode(payload[:self.http_maxlen]) + '...') else: upayload = _ensure_unicode(payload) self.httplogger.debug('Response:%s %s:%s %s %s\n %s', self._http_response_conn_id, self._http_response_status, self._http_response_reason, self._http_response_version, header_str, upayload)
Log complete http response, including response1 and payload
def copy_format(self): """Copies the format of the selected cells to the Clipboard Cells are shifted so that the top left bbox corner is at 0,0 """ row, col, tab = self.grid.actions.cursor code_array = self.grid.code_array # Cell attributes new_cell_attributes = [] selection = self.get_selection() if not selection: # Current cell is chosen for selection selection = Selection([], [], [], [], [(row, col)]) # Format content is shifted so that the top left corner is 0,0 ((top, left), (bottom, right)) = \ selection.get_grid_bbox(self.grid.code_array.shape) cell_attributes = code_array.cell_attributes for __selection, table, attrs in cell_attributes: if tab == table: new_selection = selection & __selection if new_selection: new_shifted_selection = new_selection.shifted(-top, -left) if "merge_area" not in attrs: selection_params = new_shifted_selection.parameters cellattribute = selection_params, table, attrs new_cell_attributes.append(cellattribute) # Rows shifted_new_row_heights = {} for row, table in code_array.row_heights: if tab == table and top <= row <= bottom: shifted_new_row_heights[row-top, table] = \ code_array.row_heights[row, table] # Columns shifted_new_col_widths = {} for col, table in code_array.col_widths: if tab == table and left <= col <= right: shifted_new_col_widths[col-left, table] = \ code_array.col_widths[col, table] format_data = { "cell_attributes": new_cell_attributes, "row_heights": shifted_new_row_heights, "col_widths": shifted_new_col_widths, } attr_string = repr(format_data) self.grid.main_window.clipboard.set_clipboard(attr_string)
Copies the format of the selected cells to the Clipboard Cells are shifted so that the top left bbox corner is at 0,0
def handle_fail_rcs(self, req): """ Bail out if we get a 401 and leave a message """ try: logger.debug("HTTP Status Code: %s", req.status_code) logger.debug("HTTP Response Text: %s", req.text) logger.debug("HTTP Response Reason: %s", req.reason) logger.debug("HTTP Response Content: %s", req.content) except: logger.error("Malformed HTTP Request.") # attempt to read the HTTP response JSON message try: logger.debug("HTTP Response Message: %s", req.json()["message"]) except: logger.debug("No HTTP Response message present.") # handle specific status codes if req.status_code >= 400: logger.info("Debug Information:\nHTTP Status Code: %s", req.status_code) logger.info("HTTP Status Text: %s", req.reason) if req.status_code == 401: logger.error("Authorization Required.") logger.error("Please ensure correct credentials " "in " + constants.default_conf_file) logger.debug("HTTP Response Text: %s", req.text) if req.status_code == 402: # failed registration because of entitlement limit hit logger.debug('Registration failed by 402 error.') try: logger.error(req.json()["message"]) except LookupError: logger.error("Got 402 but no message") logger.debug("HTTP Response Text: %s", req.text) except: logger.error("Got 402 but no message") logger.debug("HTTP Response Text: %s", req.text) if req.status_code == 403 and self.auto_config: # Insights disabled in satellite rhsm_hostname = urlparse(self.base_url).hostname if (rhsm_hostname != 'subscription.rhn.redhat.com' and rhsm_hostname != 'subscription.rhsm.redhat.com'): logger.error('Please enable Insights on Satellite server ' '%s to continue.', rhsm_hostname) if req.status_code == 412: try: unreg_date = req.json()["unregistered_at"] logger.error(req.json()["message"]) write_unregistered_file(unreg_date) except LookupError: unreg_date = "412, but no unreg_date or message" logger.debug("HTTP Response Text: %s", req.text) except: unreg_date = "412, but no unreg_date or message" logger.debug("HTTP Response Text: %s", req.text) return True return False
Bail out if we get a 401 and leave a message
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_retrieval_strategy' ) and self.document_retrieval_strategy is not None: _dict[ 'document_retrieval_strategy'] = self.document_retrieval_strategy return _dict
Return a json dictionary representing this model.
def create(self, message, mid=None, age=60, force=True): """ create session force if you pass `force = False`, it may raise SessionError due to duplicate message id """ with self.session_lock: if not hasattr(message, "id"): message.__setattr__("id", "event-%s" % (uuid.uuid4().hex,)) if self.session_list.get(message.id, None) is not None: if force is False: raise SessionError("Message id: %s duplicate!" % message.id) else: message = Message(message.to_dict(), generate_id=True) session = { "status": Status.CREATED, "message": message, "age": age, "mid": mid, "created_at": time(), "is_published": Event(), "is_resolved": Event() } self.session_list.update({ message.id: session }) return session
create session force if you pass `force = False`, it may raise SessionError due to duplicate message id
def restore_gc_state(): """ Restore the garbage collector state on leaving the with block. """ old_isenabled = gc.isenabled() old_flags = gc.get_debug() try: yield finally: gc.set_debug(old_flags) (gc.enable if old_isenabled else gc.disable)()
Restore the garbage collector state on leaving the with block.
def _get_axis_data(self, bunch, dim, cluster_id=None, load_all=None): """Extract the points from the data on a given dimension. bunch is returned by the features() function. dim is the string specifying the dimensions to extract for the data. """ if dim in self.attributes: return self.attributes[dim](cluster_id, load_all=load_all) masks = bunch.get('masks', None) assert dim not in self.attributes # This is called only on PC data. s = 'ABCDEFGHIJ' # Channel relative index. c_rel = int(dim[:-1]) # Get the channel_id from the currently-selected channels. channel_id = self.channel_ids[c_rel % len(self.channel_ids)] # Skup the plot if the channel id is not displayed. if channel_id not in bunch.channel_ids: # pragma: no cover return None # Get the column index of the current channel in data. c = list(bunch.channel_ids).index(channel_id) # Principal component: A=0, B=1, etc. d = s.index(dim[-1]) if masks is not None: masks = masks[:, c] return Bunch(data=bunch.data[:, c, d], masks=masks, )
Extract the points from the data on a given dimension. bunch is returned by the features() function. dim is the string specifying the dimensions to extract for the data.
def distb(self, tb=None, file=None): """Disassemble a traceback (default: last traceback).""" if tb is None: try: tb = sys.last_traceback except AttributeError: raise RuntimeError("no last traceback to disassemble") while tb.tb_next: tb = tb.tb_next self.disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
Disassemble a traceback (default: last traceback).
def plot_magseries(times, mags, magsarefluxes=False, errs=None, out=None, sigclip=30.0, normto='globalmedian', normmingap=4.0, timebin=None, yrange=None, segmentmingap=100.0, plotdpi=100): '''This plots a magnitude/flux time-series. Parameters ---------- times,mags : np.array The mag/flux time-series to plot as a function of time. magsarefluxes : bool Indicates if the input `mags` array is actually an array of flux measurements instead of magnitude measurements. If this is set to True, then the plot y-axis will be set as appropriate for mag or fluxes. In addition: - if `normto` is 'zero', then the median flux is divided from each observation's flux value to yield normalized fluxes with 1.0 as the global median. - if `normto` is 'globalmedian', then the global median flux value across the entire time series is multiplied with each measurement. - if `norm` is set to a `float`, then this number is multiplied with the flux value for each measurement. errs : np.array or None If this is provided, contains the measurement errors associated with each measurement of flux/mag in time-series. Providing this kwarg will add errbars to the output plot. out : str or StringIO/BytesIO object or None Sets the output type and target: - If `out` is a string, will save the plot to the specified file name. - If `out` is a StringIO/BytesIO object, will save the plot to that file handle. This can be useful to carry out additional operations on the output binary stream, or convert it to base64 text for embedding in HTML pages. - If `out` is None, will save the plot to a file called 'magseries-plot.png' in the current working directory. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. normto : {'globalmedian', 'zero'} or a float Sets the normalization target:: 'globalmedian' -> norms each mag to the global median of the LC column 'zero' -> norms each mag to zero a float -> norms each mag to this specified float value. normmingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. timebin : float or None The bin size to use to group together measurements closer than this amount in time. This is in seconds. If this is None, no time-binning will be performed. yrange : list of two floats or None This is used to provide a custom y-axis range to the plot. If None, will automatically determine y-axis range. segmentmingap : float or None This controls the minimum length of time (in days) required to consider a timegroup in the light curve as a separate segment. This is useful when the light curve consists of measurements taken over several seasons, so there's lots of dead space in the plot that can be cut out to zoom in on the interesting stuff. If `segmentmingap` is not None, the magseries plot will be cut in this way and the x-axis will show these breaks. plotdpi : int Sets the resolution in DPI for PNG plots (default = 100). Returns ------- str or BytesIO/StringIO object Returns based on the input: - If `out` is a str or None, the path to the generated plot file is returned. - If `out` is a StringIO/BytesIO object, will return the StringIO/BytesIO object to which the plot was written. ''' # sigclip the magnitude timeseries stimes, smags, serrs = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip) # now we proceed to binning if timebin and errs is not None: binned = time_bin_magseries_with_errs(stimes, smags, serrs, binsize=timebin) btimes, bmags, berrs = (binned['binnedtimes'], binned['binnedmags'], binned['binnederrs']) elif timebin and errs is None: binned = time_bin_magseries(stimes, smags, binsize=timebin) btimes, bmags, berrs = binned['binnedtimes'], binned['binnedmags'], None else: btimes, bmags, berrs = stimes, smags, serrs # check if we need to normalize if normto is not False: btimes, bmags = normalize_magseries(btimes, bmags, normto=normto, magsarefluxes=magsarefluxes, mingap=normmingap) btimeorigin = btimes.min() btimes = btimes - btimeorigin ################################## ## FINALLY PLOT THE LIGHT CURVE ## ################################## # if we're going to plot with segment gaps highlighted, then find the gaps if segmentmingap is not None: ntimegroups, timegroups = find_lc_timegroups(btimes, mingap=segmentmingap) # get the yrange for all the plots if it's given if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == 2: ymin, ymax = yrange # if it's not given, figure it out else: # the plot y limits are just 0.05 mags on each side if mags are used if not magsarefluxes: ymin, ymax = (bmags.min() - 0.05, bmags.max() + 0.05) # if we're dealing with fluxes, limits are 2% of the flux range per side else: ycov = bmags.max() - bmags.min() ymin = bmags.min() - 0.02*ycov ymax = bmags.max() + 0.02*ycov # if we're supposed to make the plot segment-aware (i.e. gaps longer than # segmentmingap will be cut out) if segmentmingap and ntimegroups > 1: LOGINFO('%s time groups found' % ntimegroups) # our figure is now a multiple axis plot # the aspect ratio is a bit wider fig, axes = plt.subplots(1,ntimegroups,sharey=True) fig.set_size_inches(10,4.8) axes = np.ravel(axes) # now go through each axis and make the plots for each timegroup for timegroup, ax, axind in zip(timegroups, axes, range(len(axes))): tgtimes = btimes[timegroup] tgmags = bmags[timegroup] if berrs: tgerrs = berrs[timegroup] else: tgerrs = None LOGINFO('axes: %s, timegroup %s: JD %.3f to %.3f' % ( axind, axind+1, btimeorigin + tgtimes.min(), btimeorigin + tgtimes.max()) ) ax.errorbar(tgtimes, tgmags, fmt='go', yerr=tgerrs, markersize=2.0, markeredgewidth=0.0, ecolor='grey', capsize=0) # don't use offsets on any xaxis ax.get_xaxis().get_major_formatter().set_useOffset(False) # fix the ticks to use no yoffsets and remove right spines for first # axes instance if axind == 0: ax.get_yaxis().get_major_formatter().set_useOffset(False) ax.spines['right'].set_visible(False) ax.yaxis.tick_left() # remove the right and left spines for the other axes instances elif 0 < axind < (len(axes)-1): ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.tick_params(right='off', labelright='off', left='off',labelleft='off') # make the left spines invisible for the last axes instance elif axind == (len(axes)-1): ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(True) ax.yaxis.tick_right() # set the yaxis limits if not magsarefluxes: ax.set_ylim(ymax, ymin) else: ax.set_ylim(ymin, ymax) # now figure out the xaxis ticklabels and ranges tgrange = tgtimes.max() - tgtimes.min() if tgrange < 10.0: ticklocations = [tgrange/2.0] ax.set_xlim(npmin(tgtimes) - 0.5, npmax(tgtimes) + 0.5) elif 10.0 < tgrange < 30.0: ticklocations = np.linspace(tgtimes.min()+5.0, tgtimes.max()-5.0, num=2) ax.set_xlim(npmin(tgtimes) - 2.0, npmax(tgtimes) + 2.0) elif 30.0 < tgrange < 100.0: ticklocations = np.linspace(tgtimes.min()+10.0, tgtimes.max()-10.0, num=3) ax.set_xlim(npmin(tgtimes) - 2.5, npmax(tgtimes) + 2.5) else: ticklocations = np.linspace(tgtimes.min()+20.0, tgtimes.max()-20.0, num=3) ax.set_xlim(npmin(tgtimes) - 3.0, npmax(tgtimes) + 3.0) ax.xaxis.set_ticks([int(x) for x in ticklocations]) # done with plotting all the sub axes # make the distance between sub plots smaller plt.subplots_adjust(wspace=0.07) # make the overall x and y labels fig.text(0.5, 0.00, 'JD - %.3f (not showing gaps > %.2f d)' % (btimeorigin, segmentmingap), ha='center') if not magsarefluxes: fig.text(0.02, 0.5, 'magnitude', va='center', rotation='vertical') else: fig.text(0.02, 0.5, 'flux', va='center', rotation='vertical') # make normal figure otherwise else: fig = plt.figure() fig.set_size_inches(7.5,4.8) plt.errorbar(btimes, bmags, fmt='go', yerr=berrs, markersize=2.0, markeredgewidth=0.0, ecolor='grey', capsize=0) # make a grid plt.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':') # fix the ticks to use no offsets plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) plt.xlabel('JD - %.3f' % btimeorigin) # set the yaxis limits and labels if not magsarefluxes: plt.ylim(ymax, ymin) plt.ylabel('magnitude') else: plt.ylim(ymin, ymax) plt.ylabel('flux') # check if the output filename is actually an instance of StringIO if sys.version_info[:2] < (3,0): is_Strio = isinstance(out, cStringIO.InputType) else: is_Strio = isinstance(out, Strio) # write the plot out to a file if requested if out and not is_Strio: if out.endswith('.png'): plt.savefig(out,bbox_inches='tight',dpi=plotdpi) else: plt.savefig(out,bbox_inches='tight') plt.close() return os.path.abspath(out) elif out and is_Strio: plt.savefig(out, bbox_inches='tight', dpi=plotdpi, format='png') return out elif not out and dispok: plt.show() plt.close() return else: LOGWARNING('no output file specified and no $DISPLAY set, ' 'saving to magseries-plot.png in current directory') outfile = 'magseries-plot.png' plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi) plt.close() return os.path.abspath(outfile)
This plots a magnitude/flux time-series. Parameters ---------- times,mags : np.array The mag/flux time-series to plot as a function of time. magsarefluxes : bool Indicates if the input `mags` array is actually an array of flux measurements instead of magnitude measurements. If this is set to True, then the plot y-axis will be set as appropriate for mag or fluxes. In addition: - if `normto` is 'zero', then the median flux is divided from each observation's flux value to yield normalized fluxes with 1.0 as the global median. - if `normto` is 'globalmedian', then the global median flux value across the entire time series is multiplied with each measurement. - if `norm` is set to a `float`, then this number is multiplied with the flux value for each measurement. errs : np.array or None If this is provided, contains the measurement errors associated with each measurement of flux/mag in time-series. Providing this kwarg will add errbars to the output plot. out : str or StringIO/BytesIO object or None Sets the output type and target: - If `out` is a string, will save the plot to the specified file name. - If `out` is a StringIO/BytesIO object, will save the plot to that file handle. This can be useful to carry out additional operations on the output binary stream, or convert it to base64 text for embedding in HTML pages. - If `out` is None, will save the plot to a file called 'magseries-plot.png' in the current working directory. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. normto : {'globalmedian', 'zero'} or a float Sets the normalization target:: 'globalmedian' -> norms each mag to the global median of the LC column 'zero' -> norms each mag to zero a float -> norms each mag to this specified float value. normmingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. timebin : float or None The bin size to use to group together measurements closer than this amount in time. This is in seconds. If this is None, no time-binning will be performed. yrange : list of two floats or None This is used to provide a custom y-axis range to the plot. If None, will automatically determine y-axis range. segmentmingap : float or None This controls the minimum length of time (in days) required to consider a timegroup in the light curve as a separate segment. This is useful when the light curve consists of measurements taken over several seasons, so there's lots of dead space in the plot that can be cut out to zoom in on the interesting stuff. If `segmentmingap` is not None, the magseries plot will be cut in this way and the x-axis will show these breaks. plotdpi : int Sets the resolution in DPI for PNG plots (default = 100). Returns ------- str or BytesIO/StringIO object Returns based on the input: - If `out` is a str or None, the path to the generated plot file is returned. - If `out` is a StringIO/BytesIO object, will return the StringIO/BytesIO object to which the plot was written.
def frequency_psd_from_qd(self, tau0=1.0): """ return frequency power spectral density coefficient h_a for the noise type defined by (qd, b, tau0) Colored noise generated with (qd, b, tau0) parameters will show a frequency power spectral density of S_y(f) = Frequency_PSD(f) = h_a * f^a where the slope a comes from the phase PSD slope b: a = b + 2 Kasdin & Walter eqn (39) """ a = self.b + 2.0 return self.qd*2.0*pow(2.0*np.pi, a)*pow(tau0, a-1.0)
return frequency power spectral density coefficient h_a for the noise type defined by (qd, b, tau0) Colored noise generated with (qd, b, tau0) parameters will show a frequency power spectral density of S_y(f) = Frequency_PSD(f) = h_a * f^a where the slope a comes from the phase PSD slope b: a = b + 2 Kasdin & Walter eqn (39)
def positionToIntensityUncertainty(image, sx, sy, kernelSize=None): ''' calculates the estimated standard deviation map from the changes of neighbouring pixels from a center pixel within a point spread function defined by a std.dev. in x and y taken from the (sx, sy) maps sx,sy -> either 2d array of same shape as [image] of single values ''' psf_is_const = not isinstance(sx, np.ndarray) if not psf_is_const: assert image.shape == sx.shape == sy.shape, \ "Image and position uncertainty maps need to have same size" if kernelSize is None: kernelSize = _kSizeFromStd(max(sx.max(), sy.max())) else: assert type(sx) in (int, float) and type(sx) in (int, float), \ "Image and position uncertainty values need to be int OR float" if kernelSize is None: kernelSize = _kSizeFromStd(max(sx, sy)) if image.dtype.kind == 'u': image = image.astype(int) # otherwise stack overflow through uint size = kernelSize // 2 if size < 1: size = 1 kernelSize = 1 + 2 * size # array to be filled by individual psf of every pixel: psf = np.zeros((kernelSize, kernelSize)) # intensity uncertainty as stdev: sint = np.zeros(image.shape) if psf_is_const: _calc_constPSF(image, sint, sx, sy, psf, size) else: _calc_variPSF(image, sint, sx, sy, psf, size) return sint
calculates the estimated standard deviation map from the changes of neighbouring pixels from a center pixel within a point spread function defined by a std.dev. in x and y taken from the (sx, sy) maps sx,sy -> either 2d array of same shape as [image] of single values
def get_env_credential(env='dev'): """Get Account Credential from Spinnaker for *env*. Args: env (str): Environment name to find credentials for. Returns: dict: Complete credentials for *env*:: { 'accountId': '123098123', 'accountType': 'dev', 'assumeRole': 'role/spinnakerManaged', 'bastionEnabled': False, 'challengeDestructiveActions': False, 'cloudProvider': 'aws', 'defaultKeyPair': 'dev_access', 'discoveryEnabled': False, 'eddaEnabled': False, 'environment': 'dev', 'front50Enabled': False, 'name': 'dev', 'primaryAccount': False, 'provider': 'aws', 'regions': [ { 'availabilityZones': ['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e'], 'deprecated': False, 'name': 'us-east-1', 'preferredZones': ['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e' ] }, { 'availabilityZones': ['us-west-2a', 'us-west-2b', 'us-west-2c'], 'deprecated': False, 'name': 'us-west-2', 'preferredZones': ['us-west-2a', 'us-west-2b', 'us-west-2c'] } ], 'requiredGroupMembership': [], 'sessionName': 'Spinnaker', 'type': 'aws' } """ url = '/'.join([API_URL, 'credentials', env]) credential_response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert credential_response.ok, 'Could not get credentials from Spinnaker.' credential = credential_response.json() LOG.debug('Credentials found:\n%s', credential) return credential
Get Account Credential from Spinnaker for *env*. Args: env (str): Environment name to find credentials for. Returns: dict: Complete credentials for *env*:: { 'accountId': '123098123', 'accountType': 'dev', 'assumeRole': 'role/spinnakerManaged', 'bastionEnabled': False, 'challengeDestructiveActions': False, 'cloudProvider': 'aws', 'defaultKeyPair': 'dev_access', 'discoveryEnabled': False, 'eddaEnabled': False, 'environment': 'dev', 'front50Enabled': False, 'name': 'dev', 'primaryAccount': False, 'provider': 'aws', 'regions': [ { 'availabilityZones': ['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e'], 'deprecated': False, 'name': 'us-east-1', 'preferredZones': ['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e' ] }, { 'availabilityZones': ['us-west-2a', 'us-west-2b', 'us-west-2c'], 'deprecated': False, 'name': 'us-west-2', 'preferredZones': ['us-west-2a', 'us-west-2b', 'us-west-2c'] } ], 'requiredGroupMembership': [], 'sessionName': 'Spinnaker', 'type': 'aws' }
def references(self, env, object_name, model, assoc_class, result_class_name, role, result_role, keys_only): """Instrument Associations. All four association-related operations (Associators, AssociatorNames, References, ReferenceNames) are mapped to this method. This method is a python generator Keyword arguments: env -- Provider Environment (pycimmb.ProviderEnvironment) object_name -- A pywbem.CIMInstanceName that defines the source CIM Object whose associated Objects are to be returned. model -- A template pywbem.CIMInstance to serve as a model of the objects to be returned. Only properties present on this model need to be set. assoc_class -- The pywbem.CIMClass. result_class_name -- If not empty, this string acts as a filter on the returned set of Instances by mandating that each returned Instances MUST represent an association between object_name and an Instance of a Class whose name matches this parameter or a subclass. role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST refer to object_name via a Property whose name matches the value of this parameter. result_role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST represent associations of object_name to other Instances, where the other Instances play the specified result_role in the association (i.e. the name of the Property in the Association Class that refers to the Object related to object_name MUST match the value of this parameter). keys_only -- A boolean. True if only the key properties should be set on the generated instances. The following diagram may be helpful in understanding the role, result_role, and result_class_name parameters. +------------------------+ +-------------------+ | object_name.classname | | result_class_name | | ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ | +------------------------+ +-------------------+ | +-----------------------------------+ | | | [Association] assoc_class | | | object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | | +--------------+ object_name.classname REF role | | (CIMInstanceName) | result_class_name REF result_role +------+ | |(CIMInstanceName) +-----------------------------------+ Possible Errors: CIM_ERR_ACCESS_DENIED CIM_ERR_NOT_SUPPORTED CIM_ERR_INVALID_NAMESPACE CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized or otherwise incorrect parameters) CIM_ERR_FAILED (some other unspecified error occurred) """ pass
Instrument Associations. All four association-related operations (Associators, AssociatorNames, References, ReferenceNames) are mapped to this method. This method is a python generator Keyword arguments: env -- Provider Environment (pycimmb.ProviderEnvironment) object_name -- A pywbem.CIMInstanceName that defines the source CIM Object whose associated Objects are to be returned. model -- A template pywbem.CIMInstance to serve as a model of the objects to be returned. Only properties present on this model need to be set. assoc_class -- The pywbem.CIMClass. result_class_name -- If not empty, this string acts as a filter on the returned set of Instances by mandating that each returned Instances MUST represent an association between object_name and an Instance of a Class whose name matches this parameter or a subclass. role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST refer to object_name via a Property whose name matches the value of this parameter. result_role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST represent associations of object_name to other Instances, where the other Instances play the specified result_role in the association (i.e. the name of the Property in the Association Class that refers to the Object related to object_name MUST match the value of this parameter). keys_only -- A boolean. True if only the key properties should be set on the generated instances. The following diagram may be helpful in understanding the role, result_role, and result_class_name parameters. +------------------------+ +-------------------+ | object_name.classname | | result_class_name | | ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ | +------------------------+ +-------------------+ | +-----------------------------------+ | | | [Association] assoc_class | | | object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | | +--------------+ object_name.classname REF role | | (CIMInstanceName) | result_class_name REF result_role +------+ | |(CIMInstanceName) +-----------------------------------+ Possible Errors: CIM_ERR_ACCESS_DENIED CIM_ERR_NOT_SUPPORTED CIM_ERR_INVALID_NAMESPACE CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized or otherwise incorrect parameters) CIM_ERR_FAILED (some other unspecified error occurred)
def nvm_primer(): '''Getting started with nvm (cf. https://github.com/creationix/nvm#usage). ''' print('\nDownload, compile and install the latest release of node:\n\n' + cyan(' nvm install node')) print('\nAnd then in any new shell:\n\n' + cyan(' nvm use node') + ' # use the installed version\n' + cyan(' nvm run node --version') + ' # run it\n' + cyan(' nvm exec 4.2 node --version') + ' # run any arbitrary ' 'command in a subshell with the desired version of node\n' + cyan(' nvm which 5.0') + ' # get the path to the executable to ' 'where it was installed') print('\nlist installed node versions:\n\n' + cyan(' nvm ls')) print('\nnvm usage:\n\n' + cyan(' nvm --help')) print('\n\n----\nBut at first, use a new shell or run `source ~/.bashrc`')
Getting started with nvm (cf. https://github.com/creationix/nvm#usage).
def _create_keywords_wizard_action(self): """Create action for keywords creation wizard.""" icon = resources_path('img', 'icons', 'show-keyword-wizard.svg') self.action_keywords_wizard = QAction( QIcon(icon), self.tr('Keywords Creation Wizard'), self.iface.mainWindow()) self.action_keywords_wizard.setStatusTip(self.tr( 'Open InaSAFE keywords creation wizard')) self.action_keywords_wizard.setWhatsThis(self.tr( 'Open InaSAFE keywords creation wizard')) self.action_keywords_wizard.setEnabled(False) self.action_keywords_wizard.triggered.connect( self.show_keywords_wizard) self.add_action(self.action_keywords_wizard, add_to_legend=True)
Create action for keywords creation wizard.
def clean_videos(self): """ Validates that all values in the video list are integer ids and removes all None values. """ if self.videos: self.videos = [int(v) for v in self.videos if v is not None and is_valid_digit(v)]
Validates that all values in the video list are integer ids and removes all None values.
def _sweep(self): """ Checks the state of each measurement and verifies their state, if an active measurement is now complete then passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts. :return: """ while self.running: for am in list(self.activeMeasurements): now = datetime.datetime.utcnow() # devices were allocated and have completed == complete recordingDeviceCount = len(am.recordingDevices) if recordingDeviceCount > 0: if all(entry['state'] == RecordStatus.COMPLETE.name for entry in am.recordingDevices.values()): logger.info("Detected completedmeasurement " + am.id) self._moveToComplete(am) # we have reached the end time and we have either all failed devices or no devices == kill if now > (am.endTime + datetime.timedelta(days=0, seconds=1)): allFailed = all(entry['state'] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()) if (recordingDeviceCount > 0 and allFailed) or recordingDeviceCount == 0: logger.warning("Detected failed measurement " + am.id + " with " + str(recordingDeviceCount) + " devices, allFailed: " + str(allFailed)) self._moveToFailed(am) # we are well past the end time and we have failed devices or an ongoing recording == kill or deathbed if now > (am.endTime + datetime.timedelta(days=0, seconds=self.maxTimeTilDeathbedSeconds)): if any(entry['state'] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()): logger.warning("Detected failed and incomplete measurement " + am.id + ", assumed dead") self._moveToFailed(am) elif all(entry['state'] == RecordStatus.RECORDING.name for entry in am.recordingDevices.values()): self._handleDeathbed(am) time.sleep(0.1) logger.warning("MeasurementCaretaker is now shutdown")
Checks the state of each measurement and verifies their state, if an active measurement is now complete then passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts. :return:
def getFreeEnergyDifferences(self, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False): """Get the dimensionless free energy differences and uncertainties among all thermodynamic states. Parameters ---------- compute_uncertainty : bool, optional If False, the uncertainties will not be computed (default: True) uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default. See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: svd) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) return_theta : bool, optional Whether or not to return the theta matrix. Can be useful for complicated differences. Returns ------- Deltaf_ij :L np.ndarray, float, shape=(K, K) Deltaf_ij[i,j] is the estimated free energy difference dDeltaf_ij :L np.ndarray, float, shape=(K, K) dDeltaf_ij[i,j] is the estimated statistical uncertainty (one standard deviation) in Deltaf_ij[i,j] Notes ----- Computation of the covariance matrix may take some time for large K. The reported statistical uncertainty should, in the asymptotic limit, reflect one standard deviation for the normal distribution of the estimate. The true free energy difference should fall within the interval [-df, +df] centered on the estimate 68% of the time, and within the interval [-2 df, +2 df] centered on the estimate 95% of the time. This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit. See Section III of Reference [1]. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Deltaf_ij, dDeltaf_ij] = mbar.getFreeEnergyDifferences() """ # Compute free energy differences. f_i = np.matrix(self.f_k) Deltaf_ij = f_i - f_i.transpose() # zero out numerical error for thermodynamically identical states self._zerosamestates(Deltaf_ij) returns = [] returns.append(np.array(Deltaf_ij)) if compute_uncertainty or return_theta: # Compute asymptotic covariance matrix. Theta_ij = self._computeAsymptoticCovarianceMatrix( np.exp(self.Log_W_nk), self.N_k, method=uncertainty_method) if compute_uncertainty: # compute the covariance component without doing the double loop. # d2DeltaF = Theta_ij[i,i] + Theta_ij[j,j] - 2.0 * Theta_ij[i,j] diag = Theta_ij.diagonal() d2DeltaF = diag + diag.transpose() - 2 * Theta_ij # zero out numerical error for thermodynamically identical states self._zerosamestates(d2DeltaF) # check for any numbers below zero. if (np.any(d2DeltaF < 0.0)): if(np.any(d2DeltaF) < warning_cutoff): # Hmm. Will this print correctly? print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)]) else: d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0 # take the square root of the entries of the matrix dDeltaf_ij = np.sqrt(d2DeltaF) # Return matrix of free energy differences and uncertainties. returns.append(np.array(dDeltaf_ij)) if (return_theta): returns.append(np.array(Theta_ij)) return returns
Get the dimensionless free energy differences and uncertainties among all thermodynamic states. Parameters ---------- compute_uncertainty : bool, optional If False, the uncertainties will not be computed (default: True) uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default. See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: svd) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) return_theta : bool, optional Whether or not to return the theta matrix. Can be useful for complicated differences. Returns ------- Deltaf_ij :L np.ndarray, float, shape=(K, K) Deltaf_ij[i,j] is the estimated free energy difference dDeltaf_ij :L np.ndarray, float, shape=(K, K) dDeltaf_ij[i,j] is the estimated statistical uncertainty (one standard deviation) in Deltaf_ij[i,j] Notes ----- Computation of the covariance matrix may take some time for large K. The reported statistical uncertainty should, in the asymptotic limit, reflect one standard deviation for the normal distribution of the estimate. The true free energy difference should fall within the interval [-df, +df] centered on the estimate 68% of the time, and within the interval [-2 df, +2 df] centered on the estimate 95% of the time. This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit. See Section III of Reference [1]. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Deltaf_ij, dDeltaf_ij] = mbar.getFreeEnergyDifferences()
def emit(self, record): """ Publish record to redis logging list """ try: if self.max_messages: p = self.redis_client.pipeline() p.rpush(self.key, self.format(record)) p.ltrim(self.key, -self.max_messages, -1) p.execute() else: self.redis_client.rpush(self.key, self.format(record)) except redis.RedisError: pass
Publish record to redis logging list
def monkey_patch(): """ Monkey patches `zmq.Context` and `zmq.Socket` If test_suite is True, the pyzmq test suite will be patched for compatibility as well. """ ozmq = __import__('zmq') ozmq.Socket = zmq.Socket ozmq.Context = zmq.Context ozmq.Poller = zmq.Poller ioloop = __import__('zmq.eventloop.ioloop') ioloop.Poller = zmq.Poller
Monkey patches `zmq.Context` and `zmq.Socket` If test_suite is True, the pyzmq test suite will be patched for compatibility as well.
def advance_job_status(namespace: str, job: Job, duration: float, err: Optional[Exception]): """Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals. """ duration = human_duration(duration) if not err: job.status = JobStatus.SUCCEEDED logger.info('Finished execution of %s in %s', job, duration) return if job.should_retry: job.status = JobStatus.NOT_SET job.retries += 1 if isinstance(err, RetryException) and err.at is not None: job.at = err.at else: job.at = (datetime.now(timezone.utc) + exponential_backoff(job.retries)) signals.job_schedule_retry.send(namespace, job=job, err=err) log_args = ( job.retries, job.max_retries + 1, job, duration, human_duration( (job.at - datetime.now(tz=timezone.utc)).total_seconds() ) ) if isinstance(err, RetryException): logger.info('Retry requested during execution %d/%d of %s ' 'after %s, retry in %s', *log_args) else: logger.warning('Error during execution %d/%d of %s after %s, ' 'retry in %s', *log_args) return job.status = JobStatus.FAILED signals.job_failed.send(namespace, job=job, err=err) logger.error( 'Error during execution %d/%d of %s after %s', job.max_retries + 1, job.max_retries + 1, job, duration, exc_info=err )
Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals.
def insert(self): """persist the .fields""" self.default_val = 0 #fields = self.fields #fields = self.orm_class.depart(self.fields, is_update=False) #self.set_fields(fields) return self.interface.insert( self.schema, self.fields ) return self.interface.insert(self.schema, self.fields)
persist the .fields
def _get_leftMargin(self): """ This must return an int or float. If the glyph has no outlines, this must return `None`. Subclasses may override this method. """ bounds = self.bounds if bounds is None: return None xMin, yMin, xMax, yMax = bounds return xMin
This must return an int or float. If the glyph has no outlines, this must return `None`. Subclasses may override this method.
def cbday_roll(self): """ Define default roll function to be called in apply method. """ cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds) if self._prefix.endswith('S'): # MonthBegin roll_func = cbday.rollforward else: # MonthEnd roll_func = cbday.rollback return roll_func
Define default roll function to be called in apply method.
def colored_level_name(self, levelname): """ Colors the logging level in the logging record """ if self.colors_disabled: return self.plain_levelname_format.format(levelname) else: return self.colored_levelname_format.format(self.color_map[levelname], levelname)
Colors the logging level in the logging record
def create_port(self, context, network_id, port_id, **kwargs): """Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("create_port %s %s %s" % (context.tenant_id, network_id, port_id)) # sanity check if not kwargs.get('base_net_driver'): raise IronicException(msg='base_net_driver required.') base_net_driver = kwargs['base_net_driver'] if not kwargs.get('device_id'): raise IronicException(msg='device_id required.') device_id = kwargs['device_id'] if not kwargs.get('instance_node_id'): raise IronicException(msg='instance_node_id required.') instance_node_id = kwargs['instance_node_id'] if not kwargs.get('mac_address'): raise IronicException(msg='mac_address is required.') mac_address = str(netaddr.EUI(kwargs["mac_address"]["address"])) mac_address = mac_address.replace('-', ':') # TODO(morgabra): Change this when we enable security groups. if kwargs.get('security_groups'): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) # unroll the given address models into a fixed_ips list we can # pass downstream fixed_ips = [] addresses = kwargs.get('addresses') if not isinstance(addresses, list): addresses = [addresses] for address in addresses: fixed_ips.append(self._make_fixed_ip_dict(context, address)) body = { "id": port_id, "network_id": network_id, "device_id": device_id, "device_owner": kwargs.get('device_owner', ''), "tenant_id": context.tenant_id or "quark", "roles": context.roles, "mac_address": mac_address, "fixed_ips": fixed_ips, "switch:hardware_id": instance_node_id, "dynamic_network": not STRATEGY.is_provider_network(network_id) } net_info = self._get_base_network_info( context, network_id, base_net_driver) body.update(net_info) try: LOG.info("creating downstream port: %s" % (body)) port = self._create_port(context, body) LOG.info("created downstream port: %s" % (port)) return {"uuid": port['port']['id'], "vlan_id": port['port']['vlan_id']} except Exception as e: msg = "failed to create downstream port. Exception: %s" % (e) raise IronicException(msg=msg)
Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised.
def series_to_yaml_safe(series, ordered=False): """ Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ index = series.index.to_native_types(quoting=True) values = series.values.tolist() if ordered: return OrderedDict( tuple((k, v)) for k, v in zip(index, values)) else: return {i: v for i, v in zip(index, values)}
Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict
def drawItem(self, item, painter, option): """ Draws the inputed item as a bar graph. :param item | <XChartDatasetItem> painter | <QPainter> option | <QStyleOptionGraphicsItem> """ dataset = item.dataset() painter.save() painter.setRenderHint(painter.Antialiasing) pen = QPen(dataset.color()) pen.setWidth(0.75) painter.setPen(pen) for path in item.buildData('subpaths', []): gradient = QLinearGradient() clr = QColor(dataset.color()) clr.setAlpha(220) gradient.setColorAt(0.0, clr.lighter(180)) gradient.setColorAt(0.1, clr.lighter(160)) gradient.setColorAt(0.25, clr.lighter(140)) gradient.setColorAt(1.0, clr.lighter(125)) if self.orientation() == Qt.Vertical: gradient.setStart(0, path.boundingRect().bottom()) gradient.setFinalStop(0, path.boundingRect().top()) else: gradient.setStart(path.boundingRect().left(), 0) gradient.setFinalStop(path.boundingRect().right(), 0) painter.setBrush(gradient) painter.drawPath(path) painter.restore()
Draws the inputed item as a bar graph. :param item | <XChartDatasetItem> painter | <QPainter> option | <QStyleOptionGraphicsItem>
def negative_gradient(self, y, y_pred, sample_weight=None, **kwargs): """Negative gradient of partial likelihood Parameters --------- y : tuple, len = 2 First element is boolean event indicator and second element survival/censoring time. y_pred : np.ndarray, shape=(n,): The predictions. """ pred_time = y['time'] - y_pred.ravel() mask = (pred_time > 0) | y['event'] ret = numpy.zeros(y['event'].shape[0]) ret[mask] = pred_time.compress(mask, axis=0) if sample_weight is not None: ret *= sample_weight return ret
Negative gradient of partial likelihood Parameters --------- y : tuple, len = 2 First element is boolean event indicator and second element survival/censoring time. y_pred : np.ndarray, shape=(n,): The predictions.
def firmware_manifest_retrieve(self, manifest_id, **kwargs): # noqa: E501 """Get a manifest # noqa: E501 Retrieve a firmware manifest. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.firmware_manifest_retrieve(manifest_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str manifest_id: The firmware manifest ID (required) :return: FirmwareManifest If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.firmware_manifest_retrieve_with_http_info(manifest_id, **kwargs) # noqa: E501 else: (data) = self.firmware_manifest_retrieve_with_http_info(manifest_id, **kwargs) # noqa: E501 return data
Get a manifest # noqa: E501 Retrieve a firmware manifest. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.firmware_manifest_retrieve(manifest_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str manifest_id: The firmware manifest ID (required) :return: FirmwareManifest If the method is called asynchronously, returns the request thread.
def validate(self, input_string): """ Validate url :return: True if match / False otherwise """ parsed_url = urlparse(url=input_string) return bool(parsed_url.scheme and parsed_url.netloc)
Validate url :return: True if match / False otherwise
def approve(group_id, user_id): """Approve a user.""" membership = Membership.query.get_or_404((user_id, group_id)) group = membership.group if group.can_edit(current_user): try: membership.accept() except Exception as e: flash(str(e), 'error') return redirect(url_for('.requests', group_id=membership.group.id)) flash(_('%(user)s accepted to %(name)s group.', user=membership.user.email, name=membership.group.name), 'success') return redirect(url_for('.requests', group_id=membership.group.id)) flash( _( 'You cannot approve memberships for the group %(group_name)s', group_name=group.name ), 'error' ) return redirect(url_for('.index'))
Approve a user.
def pattern_match(pattern, string): """ :type pattern: str :type string: str :rtype: bool """ def backtrack(pattern, string, dic): if len(pattern) == 0 and len(string) > 0: return False if len(pattern) == len(string) == 0: return True for end in range(1, len(string)-len(pattern)+2): if pattern[0] not in dic and string[:end] not in dic.values(): dic[pattern[0]] = string[:end] if backtrack(pattern[1:], string[end:], dic): return True del dic[pattern[0]] elif pattern[0] in dic and dic[pattern[0]] == string[:end]: if backtrack(pattern[1:], string[end:], dic): return True return False return backtrack(pattern, string, {})
:type pattern: str :type string: str :rtype: bool
async def _get_packet_from_stream(self, stream, existing_data, got_first_packet=True, psml_structure=None): """A coroutine which returns a single packet if it can be read from the given StreamReader. :return a tuple of (packet, remaining_data). The packet will be None if there was not enough XML data to create a packet. remaining_data is the leftover data which was not enough to create a packet from. :raises EOFError if EOF was reached. """ # yield each packet in existing_data if self.use_json: packet, existing_data = self._extract_packet_json_from_data(existing_data, got_first_packet=got_first_packet) else: packet, existing_data = self._extract_tag_from_data(existing_data) if packet: if self.use_json: packet = packet_from_json_packet(packet) else: packet = packet_from_xml_packet(packet, psml_structure=psml_structure) return packet, existing_data new_data = await stream.read(self.DEFAULT_BATCH_SIZE) existing_data += new_data if not new_data: # Reached EOF raise EOFError() return None, existing_data
A coroutine which returns a single packet if it can be read from the given StreamReader. :return a tuple of (packet, remaining_data). The packet will be None if there was not enough XML data to create a packet. remaining_data is the leftover data which was not enough to create a packet from. :raises EOFError if EOF was reached.
def dev_parameters_vs_axis(dnaRef, dnaSubj, parameter, bp, axis='Z', bp_range=True, windows=10, err_type='block', tool='gmx analyze'): """To calculate deviation in the given parameters of a Subject DNA to Reference DNA along the given axis. .. note:: Deviation = Reference_DNA(parameter) - Subject_DNA(parameter) .. warning:: To calculate errors by using ``error = 'acf'`` or ``error = 'block'``, GROMACS tool ``g_analyze`` or ``gmx analyze`` should be present in ``$PATH``. Parameters ---------- dnaRef : :class:`DNA` Reference DNA dnaSubj : :class:`DNA` Subject DNA. Number of base-pairs in Reference and Subject DNA **should be** same. parameter : str Name of a base-pair or base-step or helical base-step parameter For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. bp : 1D list or array base-pairs to analyze Example: :: bp = [6] # bp_range = False bp = [4,15] # bp_range = True bp = range(4,15) # bp_range = False bp = np.arange(4,15) # bp_range = False bp = [2,5,6,7,9,12,18] # bp_range = False bp_range : bool ``Default=True``: As shown above, if ``True``, bp is taken as a range otherwise list or numpy array. axis : str Axis along which DNA axis is parallel. Keywords: ``X``, ``Y`` and ``Z``. windows : int Number of bins along the axis err_type : str Method of error estimation. Currently accepted method as follows: * ``error = 'std'`` : Standard Deviation * ``error = 'acf'`` : Standard error using autocorrelation time (requires: ``g_analyze`` or ``gmx analyze``) * ``error = 'block'`` : Standard error using block averaging method (requires: ``g_analyze`` or ``gmx analyze``) tool : str Gromacs tool ``g_analyze`` or ``gmx analyze`` or ``gmx_mpi analyze`` etc. It will be used to calculate autocorrelation time or block averaging error. It should be present in ``$PATH`` Returns ------- deviation : 1D array length of no. of windows; Deviation in the parameter for two given DNA deviation_error : 1D array length of no. of windows; Standard error in deviation fo each window/bin axis : 1D array length of no. of windows; average position of window/bin along given axis axis_error : 1D array length of no. of windows; Standard error in average position of window/bin along given axis """ RefParam, ref_bp_idx = dnaRef.get_parameters(parameter, bp, bp_range) RefAxis, dummy = dnaRef.get_parameters( 'Helical {0}-axis' .format(axis), bp, bp_range) SubjParam, subj_bp_idx = dnaSubj.get_parameters(parameter, bp, bp_range) mean_axis = np.mean(RefAxis, axis=1) meanRefParam = np.mean(RefParam, axis=1) meanSubjParam = np.mean(SubjParam, axis=1) maxAxis = np.amax(mean_axis) minAxis = np.amin(mean_axis) axis_range = (maxAxis - minAxis) / windows Ref_param_error = get_error(dnaRef.time, RefParam, len(ref_bp_idx), err_type=err_type, tool=tool) Ref_axis_error = get_error(dnaRef.time, RefAxis, len(ref_bp_idx), err_type=err_type, tool=tool) subj_param_error = get_error(dnaSubj.time, SubjParam, len(subj_bp_idx), err_type=err_type, tool=tool) merged_ref_param = [] merged_subj_Param = [] merged_Ref_param_error = [] merged_Ref_axis_error = [] merged_subj_param_error = [] final_axis = [] for i in range(windows): start = minAxis + (i * axis_range) end = start + axis_range idx = [] for j in range(len(mean_axis)): if((start <= mean_axis[j]) and (end > mean_axis[j])): idx.append(j) if(len(idx) > 0): merged_ref_param.append(meanRefParam[idx]) merged_subj_Param.append(meanSubjParam[idx]) final_axis.append(start + (end - start) / 2) merged_Ref_param_error.append(Ref_param_error[idx]) merged_Ref_axis_error.append(Ref_axis_error[idx]) merged_subj_param_error.append(subj_param_error[idx]) final_ref_param = [] final_subj_param = [] final_ref_param_error = [] final_ref_axis_error = [] final_subj_param_error = [] for i in range(len(merged_ref_param)): final_ref_param.append(np.sum(merged_ref_param[i])) final_subj_param.append(np.sum(merged_subj_Param[i])) final_ref_axis_error.append( np.sqrt((merged_Ref_axis_error[i]**2).sum())) final_ref_param_error.append( np.sqrt((merged_Ref_param_error[i]**2).sum())) final_subj_param_error.append( np.sqrt((merged_subj_param_error[i]**2).sum())) deviation, error = get_deviation( final_ref_param, final_ref_param_error, final_subj_param, final_subj_param_error) return deviation, error, final_axis, final_ref_axis_error
To calculate deviation in the given parameters of a Subject DNA to Reference DNA along the given axis. .. note:: Deviation = Reference_DNA(parameter) - Subject_DNA(parameter) .. warning:: To calculate errors by using ``error = 'acf'`` or ``error = 'block'``, GROMACS tool ``g_analyze`` or ``gmx analyze`` should be present in ``$PATH``. Parameters ---------- dnaRef : :class:`DNA` Reference DNA dnaSubj : :class:`DNA` Subject DNA. Number of base-pairs in Reference and Subject DNA **should be** same. parameter : str Name of a base-pair or base-step or helical base-step parameter For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. bp : 1D list or array base-pairs to analyze Example: :: bp = [6] # bp_range = False bp = [4,15] # bp_range = True bp = range(4,15) # bp_range = False bp = np.arange(4,15) # bp_range = False bp = [2,5,6,7,9,12,18] # bp_range = False bp_range : bool ``Default=True``: As shown above, if ``True``, bp is taken as a range otherwise list or numpy array. axis : str Axis along which DNA axis is parallel. Keywords: ``X``, ``Y`` and ``Z``. windows : int Number of bins along the axis err_type : str Method of error estimation. Currently accepted method as follows: * ``error = 'std'`` : Standard Deviation * ``error = 'acf'`` : Standard error using autocorrelation time (requires: ``g_analyze`` or ``gmx analyze``) * ``error = 'block'`` : Standard error using block averaging method (requires: ``g_analyze`` or ``gmx analyze``) tool : str Gromacs tool ``g_analyze`` or ``gmx analyze`` or ``gmx_mpi analyze`` etc. It will be used to calculate autocorrelation time or block averaging error. It should be present in ``$PATH`` Returns ------- deviation : 1D array length of no. of windows; Deviation in the parameter for two given DNA deviation_error : 1D array length of no. of windows; Standard error in deviation fo each window/bin axis : 1D array length of no. of windows; average position of window/bin along given axis axis_error : 1D array length of no. of windows; Standard error in average position of window/bin along given axis
def apply(self, func, axis=0, subset=None, **kwargs): """ Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a Series or DataFrame (depending on ``axis``), and return an object with the same shape. Must return a DataFrame with identical index and column labels when ``axis=None`` axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler Notes ----- The output shape of ``func`` should match the input, i.e. if ``x`` is the input row, column, or table (depending on ``axis``), then ``func(x).shape == x.shape`` should be true. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- >>> def highlight_max(x): ... return ['background-color: yellow' if v == x.max() else '' for v in x] ... >>> df = pd.DataFrame(np.random.randn(5, 2)) >>> df.style.apply(highlight_max) """ self._todo.append((lambda instance: getattr(instance, '_apply'), (func, axis, subset), kwargs)) return self
Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a Series or DataFrame (depending on ``axis``), and return an object with the same shape. Must return a DataFrame with identical index and column labels when ``axis=None`` axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler Notes ----- The output shape of ``func`` should match the input, i.e. if ``x`` is the input row, column, or table (depending on ``axis``), then ``func(x).shape == x.shape`` should be true. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- >>> def highlight_max(x): ... return ['background-color: yellow' if v == x.max() else '' for v in x] ... >>> df = pd.DataFrame(np.random.randn(5, 2)) >>> df.style.apply(highlight_max)
def debit(self, amount, credit_account, description, debit_memo="", credit_memo="", datetime=None): """ Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively. note amount must be non-negative. """ assert amount >= 0 return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime)
Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively. note amount must be non-negative.
def masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ Extract from a masked rec array and create the manager. """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = get_names_from_index(fdata) if index is None: index = ibase.default_index(len(data)) index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() return mgr
Extract from a masked rec array and create the manager.
def find_card_bundles(provider: Provider, deck: Deck) -> Optional[Iterator]: '''each blockchain transaction can contain multiple cards, wrapped in bundles. This method finds and returns those bundles.''' if isinstance(provider, RpcNode): if deck.id is None: raise Exception("deck.id required to listtransactions") p2th_account = provider.getaccount(deck.p2th_address) batch_data = [('getrawtransaction', [i["txid"], 1]) for i in provider.listtransactions(p2th_account)] result = provider.batch(batch_data) if result is not None: raw_txns = [i['result'] for i in result if result] else: raise EmptyP2THDirectory({'error': 'No cards found on this deck.'}) else: if deck.p2th_address is None: raise Exception("deck.p2th_address required to listtransactions") try: raw_txns = (provider.getrawtransaction(i, 1) for i in provider.listtransactions(deck.p2th_address)) except TypeError: raise EmptyP2THDirectory({'error': 'No cards found on this deck.'}) return (card_bundler(provider, deck, i) for i in raw_txns)
each blockchain transaction can contain multiple cards, wrapped in bundles. This method finds and returns those bundles.
def Synchronized(f): """Synchronization decorator.""" @functools.wraps(f) def NewFunction(self, *args, **kw): with self.lock: return f(self, *args, **kw) return NewFunction
Synchronization decorator.
def compare_dicts(old=None, new=None): ''' Compare before and after results from various salt functions, returning a dict describing the changes that were made. ''' ret = {} for key in set((new or {})).union((old or {})): if key not in old: # New key ret[key] = {'old': '', 'new': new[key]} elif key not in new: # Key removed ret[key] = {'new': '', 'old': old[key]} elif new[key] != old[key]: # Key modified ret[key] = {'old': old[key], 'new': new[key]} return ret
Compare before and after results from various salt functions, returning a dict describing the changes that were made.
def imbtree(ntips, treeheight=1.0): """ Return an imbalanced (comb-like) tree topology. """ rtree = toytree.tree() rtree.treenode.add_child(name="0") rtree.treenode.add_child(name="1") for i in range(2, ntips): # empty node cherry = toytree.tree() # add new child cherry.treenode.add_child(name=str(i)) # add old tree cherry.treenode.add_child(rtree.treenode) # update rtree rtree = cherry # get toytree from newick tre = toytree.tree(rtree.write(tree_format=9)) tre = tre.mod.make_ultrametric() self = tre.mod.node_scale_root_height(treeheight) self._coords.update() return self
Return an imbalanced (comb-like) tree topology.