code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def create_dimension(ncfile, name, length) -> None: """Add a new dimension with the given name and length to the given NetCDF file. Essentially, |create_dimension| just calls the equally named method of the NetCDF library, but adds information to possible error messages: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> with TestIO(): ... ncfile = netcdf4.Dataset('test.nc', 'w') >>> from hydpy.core.netcdftools import create_dimension >>> create_dimension(ncfile, 'dim1', 5) >>> dim = ncfile.dimensions['dim1'] >>> dim.size if hasattr(dim, 'size') else dim 5 >>> try: ... create_dimension(ncfile, 'dim1', 5) ... except BaseException as exc: ... print(exc) # doctest: +ELLIPSIS While trying to add dimension `dim1` with length `5` \ to the NetCDF file `test.nc`, the following error occurred: ... >>> ncfile.close() """ try: ncfile.createDimension(name, length) except BaseException: objecttools.augment_excmessage( 'While trying to add dimension `%s` with length `%d` ' 'to the NetCDF file `%s`' % (name, length, get_filepath(ncfile)))
Add a new dimension with the given name and length to the given NetCDF file. Essentially, |create_dimension| just calls the equally named method of the NetCDF library, but adds information to possible error messages: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> with TestIO(): ... ncfile = netcdf4.Dataset('test.nc', 'w') >>> from hydpy.core.netcdftools import create_dimension >>> create_dimension(ncfile, 'dim1', 5) >>> dim = ncfile.dimensions['dim1'] >>> dim.size if hasattr(dim, 'size') else dim 5 >>> try: ... create_dimension(ncfile, 'dim1', 5) ... except BaseException as exc: ... print(exc) # doctest: +ELLIPSIS While trying to add dimension `dim1` with length `5` \ to the NetCDF file `test.nc`, the following error occurred: ... >>> ncfile.close()
def showtraceback(self, *args, **kwargs): """Display the exception that just occurred.""" # Override for avoid using sys.excepthook PY-12600 try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] lines = traceback.format_list(tblist) if lines: lines.insert(0, "Traceback (most recent call last):\n") lines.extend(traceback.format_exception_only(type, value)) finally: tblist = tb = None sys.stderr.write(''.join(lines))
Display the exception that just occurred.
def _scalar_field_to_json(field, row_value): """Maps a field and value to a JSON-safe value. Args: field ( \ :class:`~google.cloud.bigquery.schema.SchemaField`, \ ): The SchemaField to use for type conversion and field name. row_value (any): Value to be converted, based on the field's type. Returns: any: A JSON-serializable object. """ converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type) if converter is None: # STRING doesn't need converting return row_value return converter(row_value)
Maps a field and value to a JSON-safe value. Args: field ( \ :class:`~google.cloud.bigquery.schema.SchemaField`, \ ): The SchemaField to use for type conversion and field name. row_value (any): Value to be converted, based on the field's type. Returns: any: A JSON-serializable object.
def requires(self): """ This task's dependencies: * :py:class:`~.AggregateArtists` or * :py:class:`~.AggregateArtistsSpark` if :py:attr:`~/.Top10Artists.use_spark` is set. :return: object (:py:class:`luigi.task.Task`) """ if self.use_spark: return AggregateArtistsSpark(self.date_interval) else: return AggregateArtists(self.date_interval)
This task's dependencies: * :py:class:`~.AggregateArtists` or * :py:class:`~.AggregateArtistsSpark` if :py:attr:`~/.Top10Artists.use_spark` is set. :return: object (:py:class:`luigi.task.Task`)
def song(self, song_id): """Get information about a song. Parameters: song_id (str): A song ID. Returns: dict: Song information. """ if song_id.startswith('T'): song_info = self._call( mc_calls.FetchTrack, song_id ).body else: song_info = next( ( song for song in self.songs() if song['id'] == song_id ), None ) return song_info
Get information about a song. Parameters: song_id (str): A song ID. Returns: dict: Song information.
def calc_and_plot_sample_orient_check(self): """ If sample orientation is on plots the wrong arrow, wrong compass, and rotated sample error directions for the current specimen interpretation on the high level mean plot so that you can check sample orientation good/bad. """ fit = self.current_fit if fit == None: return pars = fit.get('specimen') if 'specimen_dec' not in list(pars.keys()) or 'specimen_inc' not in list(pars.keys()): fit.put(self.s, 'specimen', self.get_PCA_parameters( self.s, fit, fit.tmin, fit.tmax, 'specimen', fit.PCA_type)) pars = fit.get('specimen') if not pars: self.user_warning( "could not calculate fit %s for specimen %s in specimen coordinate system while checking sample orientation please check data" % (fit.name, self.s)) return dec, inc = pars['specimen_dec'], pars['specimen_inc'] sample = self.Data_hierarchy['sample_of_specimen'][self.s] if sample not in list(self.Data_info["er_samples"].keys()) or "sample_azimuth" not in list(self.Data_info["er_samples"][sample].keys()) or "sample_dip" not in list(self.Data_info["er_samples"][sample].keys()): self.user_warning( "Could not display sample orientation checks because sample azimuth or sample dip is missing from er_samples table for sample %s" % sample) self.check_orient_on = False # stop trying because this raises a ton of warnings return try: azimuth = float(self.Data_info["er_samples"][sample]['sample_azimuth']) dip = float(self.Data_info["er_samples"][sample]['sample_dip']) except TypeError: self.user_warning( "Could not display sample orientation checks because azimuth or dip is missing (or invalid) for sample %s" % sample) self.check_orient_on = False # stop trying because this raises a ton of warnings return # first test wrong direction of drill arrows (flip drill direction in opposite direction and re-calculate d,i) d, i = pmag.dogeo(dec, inc, azimuth-180., -dip) XY = pmag.dimap(d, i) if i > 0: FC = fit.color SIZE = 15*self.GUI_RESOLUTION else: FC = 'white' SIZE = 15*self.GUI_RESOLUTION self.high_level_eqarea.scatter([XY[0]], [ XY[1]], marker='^', edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False) if self.ie_open: self.ie.scatter([XY[0]], [XY[1]], marker='^', edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False) # Then test wrong end of compass (take az-180.) d, i = pmag.dogeo(dec, inc, azimuth-180., dip) XY = pmag.dimap(d, i) if i > 0: FC = fit.color SIZE = 15*self.GUI_RESOLUTION else: FC = 'white' SIZE = 15*self.GUI_RESOLUTION self.high_level_eqarea.scatter([XY[0]], [ XY[1]], marker='v', edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False) if self.ie_open: self.ie.scatter([XY[0]], [XY[1]], marker='v', edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False) # did the sample spin in the hole? # now spin around specimen's z X_up, Y_up, X_d, Y_d = [], [], [], [] for incr in range(0, 360, 5): d, i = pmag.dogeo(dec+incr, inc, azimuth, dip) XY = pmag.dimap(d, i) if i >= 0: X_d.append(XY[0]) Y_d.append(XY[1]) else: X_up.append(XY[0]) Y_up.append(XY[1]) self.high_level_eqarea.scatter( X_d, Y_d, marker='.', color=fit.color, alpha=.5, s=SIZE/2, lw=1, clip_on=False) self.high_level_eqarea.scatter( X_up, Y_up, marker='.', color=fit.color, s=SIZE/2, lw=1, clip_on=False) if self.ie_open: self.ie.scatter(X_d, Y_d, marker='.', color=fit.color, alpha=.5, s=SIZE/2, lw=1, clip_on=False) self.ie.scatter(X_up, Y_up, marker='.', color=fit.color, s=SIZE/2, lw=1, clip_on=False)
If sample orientation is on plots the wrong arrow, wrong compass, and rotated sample error directions for the current specimen interpretation on the high level mean plot so that you can check sample orientation good/bad.
def _get_content(data, which_content): """ get the content that could be hidden in the middle of "content" or "summary detail" from the data of the provider """ content = '' if data.get(which_content): if isinstance(data.get(which_content), feedparser.FeedParserDict): content = data.get(which_content)['value'] elif not isinstance(data.get(which_content), str): if 'value' in data.get(which_content)[0]: content = data.get(which_content)[0].value else: content = data.get(which_content) return content
get the content that could be hidden in the middle of "content" or "summary detail" from the data of the provider
def maybe_download(url, filename): """Download the data from Yann's website, unless it's already here.""" if not os.path.exists(WORK_DIRECTORY): os.mkdir(WORK_DIRECTORY) filepath = os.path.join(WORK_DIRECTORY, filename) if not os.path.exists(filepath): filepath, _ = request.urlretrieve(url + filename, filepath) statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
Download the data from Yann's website, unless it's already here.
def drp_load_data(package, data, confclass=None): """Load the DRPS from data.""" drpdict = yaml.safe_load(data) ins = load_instrument(package, drpdict, confclass=confclass) if ins.version == 'undefined': pkg = importlib.import_module(package) ins.version = getattr(pkg, '__version__', 'undefined') return ins
Load the DRPS from data.
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True): """ Returns the NEB plot. Uses Henkelman's approach of spline fitting each section of the reaction path based on tangent force and energies. Args: normalize_rxn_coordinate (bool): Whether to normalize the reaction coordinate to between 0 and 1. Defaults to True. label_barrier (bool): Whether to label the maximum barrier. Returns: matplotlib.pyplot object. """ plt = pretty_plot(12, 8) scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1] x = np.arange(0, np.max(self.r), 0.01) y = self.spline(x) * 1000 relative_energies = self.energies - self.energies[0] plt.plot(self.r * scale, relative_energies * 1000, 'ro', x * scale, y, 'k-', linewidth=2, markersize=10) plt.xlabel("Reaction coordinate") plt.ylabel("Energy (meV)") plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20)) if label_barrier: data = zip(x * scale, y) barrier = max(data, key=lambda d: d[1]) plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--') plt.annotate('%.0f meV' % (np.max(y) - np.min(y)), xy=(barrier[0] / 2, barrier[1] * 1.02), xytext=(barrier[0] / 2, barrier[1] * 1.02), horizontalalignment='center') plt.tight_layout() return plt
Returns the NEB plot. Uses Henkelman's approach of spline fitting each section of the reaction path based on tangent force and energies. Args: normalize_rxn_coordinate (bool): Whether to normalize the reaction coordinate to between 0 and 1. Defaults to True. label_barrier (bool): Whether to label the maximum barrier. Returns: matplotlib.pyplot object.
def read_string_from_file(path, encoding="utf8"): """ Read entire contents of file into a string. """ with codecs.open(path, "rb", encoding=encoding) as f: value = f.read() return value
Read entire contents of file into a string.
def add_role(self, role, term, start_date=None, end_date=None, **kwargs): """ Examples: leg.add_role('member', term='2009', chamber='upper', party='Republican', district='10th') """ self['roles'].append(dict(role=role, term=term, start_date=start_date, end_date=end_date, **kwargs))
Examples: leg.add_role('member', term='2009', chamber='upper', party='Republican', district='10th')
def press(self): ''' press key via name or key code. Supported key name includes: home, back, left, right, up, down, center, menu, search, enter, delete(or del), recent(recent apps), volume_up, volume_down, volume_mute, camera, power. Usage: d.press.back() # press back key d.press.menu() # press home key d.press(89) # press keycode ''' @param_to_property( key=["home", "back", "left", "right", "up", "down", "center", "menu", "search", "enter", "delete", "del", "recent", "volume_up", "volume_down", "volume_mute", "camera", "power"] ) def _press(key, meta=None): if isinstance(key, int): return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key) else: return self.server.jsonrpc.pressKey(str(key)) return _press
press key via name or key code. Supported key name includes: home, back, left, right, up, down, center, menu, search, enter, delete(or del), recent(recent apps), volume_up, volume_down, volume_mute, camera, power. Usage: d.press.back() # press back key d.press.menu() # press home key d.press(89) # press keycode
def _copy_old_features(new_eopatch, old_eopatch, copy_features): """ Copy features from old EOPatch :param new_eopatch: New EOPatch container where the old features will be copied to :type new_eopatch: EOPatch :param old_eopatch: Old EOPatch container where the old features are located :type old_eopatch: EOPatch :param copy_features: List of tuples of type (FeatureType, str) or (FeatureType, str, str) that are copied over into the new EOPatch. The first string is the feature name, and the second one (optional) is a new name to be used for the feature :type copy_features: list((FeatureType, str) or (FeatureType, str, str)) """ if copy_features: existing_features = set(new_eopatch.get_feature_list()) for copy_feature_type, copy_feature_name, copy_new_feature_name in copy_features: new_feature = copy_feature_type, copy_new_feature_name if new_feature in existing_features: raise ValueError('Feature {} of {} already exists in the new EOPatch! ' 'Use a different name!'.format(copy_new_feature_name, copy_feature_type)) else: existing_features.add(new_feature) new_eopatch[copy_feature_type][copy_new_feature_name] = \ old_eopatch[copy_feature_type][copy_feature_name] return new_eopatch
Copy features from old EOPatch :param new_eopatch: New EOPatch container where the old features will be copied to :type new_eopatch: EOPatch :param old_eopatch: Old EOPatch container where the old features are located :type old_eopatch: EOPatch :param copy_features: List of tuples of type (FeatureType, str) or (FeatureType, str, str) that are copied over into the new EOPatch. The first string is the feature name, and the second one (optional) is a new name to be used for the feature :type copy_features: list((FeatureType, str) or (FeatureType, str, str))
def OSCBlob(next): """Convert a string into an OSC Blob, returning a (typetag, data) tuple.""" if type(next) == type(""): length = len(next) padded = math.ceil((len(next)) / 4.0) * 4 binary = struct.pack(">i%ds" % (padded), length, next) tag = 'b' else: tag = '' binary = '' return (tag, binary)
Convert a string into an OSC Blob, returning a (typetag, data) tuple.
def _create_deployment_object(self, job_name, job_image, deployment_name, port=80, replicas=1, cmd_string=None, engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json', engine_dir='.'): """ Create a kubernetes deployment for the job. Args: - job_name (string) : Name of the job and deployment - job_image (string) : Docker image to launch KWargs: - port (integer) : Container port - replicas : Number of replica containers to maintain Returns: - True: The deployment object to launch """ # sorry, quick hack that doesn't pass this stuff through to test it works. # TODO it also doesn't only add what is set :( security_context = None if 'security' in self.config['execution']: security_context = client.V1SecurityContext(run_as_group=self.group_id, run_as_user=self.user_id, run_as_non_root=self.run_as_non_root) # self.user_id = None # self.group_id = None # self.run_as_non_root = None # Create the enviornment variables and command to initiate IPP environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA") launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)] print(launch_args) # Configureate Pod template container container = None if security_context: container = client.V1Container( name=job_name, image=job_image, ports=[client.V1ContainerPort(container_port=port)], command=['/bin/bash'], args=launch_args, env=[environment_vars], security_context=security_context) else: container = client.V1Container( name=job_name, image=job_image, ports=[client.V1ContainerPort(container_port=port)], command=['/bin/bash'], args=launch_args, env=[environment_vars]) # Create a secret to enable pulling images from secure repositories secret = None if self.secret: secret = client.V1LocalObjectReference(name=self.secret) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": job_name}), spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret])) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=deployment_name), spec=spec) return deployment
Create a kubernetes deployment for the job. Args: - job_name (string) : Name of the job and deployment - job_image (string) : Docker image to launch KWargs: - port (integer) : Container port - replicas : Number of replica containers to maintain Returns: - True: The deployment object to launch
def analyze_dir(stats, parent_dir, rel_filepaths, cover_filename, *, ignore_existing=False): """ Analyze a directory (non recursively) to get its album metadata if it is one. """ no_metadata = None, None, None metadata = no_metadata audio_filepaths = [] for rel_filepath in rel_filepaths: stats["files"] += 1 try: ext = os.path.splitext(rel_filepath)[1][1:].lower() except IndexError: continue if ext in AUDIO_EXTENSIONS: audio_filepaths.append(os.path.join(parent_dir, rel_filepath)) if audio_filepaths: stats["albums"] += 1 if (cover_filename != EMBEDDED_ALBUM_ART_SYMBOL): missing = (not os.path.isfile(os.path.join(parent_dir, cover_filename))) or ignore_existing if missing: metadata = get_metadata(audio_filepaths) else: metadata = get_metadata(audio_filepaths) missing = (not metadata[2]) or ignore_existing if missing: stats["missing covers"] += 1 if not all(metadata[:-1]): # failed to get metadata for this album stats["errors"] += 1 logging.getLogger("sacad_r").error("Unable to read metadata for album directory '%s'" % (parent_dir)) else: metadata = no_metadata return metadata
Analyze a directory (non recursively) to get its album metadata if it is one.
def null_concept(self): """Return the null concept of this subsystem. The null concept is a point in concept space identified with the unconstrained cause and effect repertoire of this subsystem. """ # Unconstrained cause repertoire. cause_repertoire = self.cause_repertoire((), ()) # Unconstrained effect repertoire. effect_repertoire = self.effect_repertoire((), ()) # Null cause. cause = MaximallyIrreducibleCause( _null_ria(Direction.CAUSE, (), (), cause_repertoire)) # Null effect. effect = MaximallyIrreducibleEffect( _null_ria(Direction.EFFECT, (), (), effect_repertoire)) # All together now... return Concept(mechanism=(), cause=cause, effect=effect, subsystem=self)
Return the null concept of this subsystem. The null concept is a point in concept space identified with the unconstrained cause and effect repertoire of this subsystem.
def find_usb_device_by_address(self, name): """Searches for a USB device with the given host address. :py:func:`IUSBDevice.address` in name of type str Address of the USB device (as assigned by the host) to search for. return device of type :class:`IHostUSBDevice` Found USB device object. raises :class:`VBoxErrorObjectNotFound` Given @c name does not correspond to any USB device. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") device = self._call("findUSBDeviceByAddress", in_p=[name]) device = IHostUSBDevice(device) return device
Searches for a USB device with the given host address. :py:func:`IUSBDevice.address` in name of type str Address of the USB device (as assigned by the host) to search for. return device of type :class:`IHostUSBDevice` Found USB device object. raises :class:`VBoxErrorObjectNotFound` Given @c name does not correspond to any USB device.
def first_setup(self): """This is a guess of the meaning of this value.""" if ATTR_FIRST_SETUP not in self.raw: return None return datetime.utcfromtimestamp(self.raw[ATTR_FIRST_SETUP])
This is a guess of the meaning of this value.
def get_osdp(self, id_or_uri): """ Retrieves facts about Server Profiles and Server Profile Templates that are using Deployment Plan based on the ID or URI provided. Args: id_or_uri: ID or URI of the Deployment Plan. Returns: dict: Server Profiles and Server Profile Templates """ uri = self._client.build_subresource_uri(resource_id_or_uri=id_or_uri, subresource_path="osdp") return self._client.get(uri)
Retrieves facts about Server Profiles and Server Profile Templates that are using Deployment Plan based on the ID or URI provided. Args: id_or_uri: ID or URI of the Deployment Plan. Returns: dict: Server Profiles and Server Profile Templates
def set_prefix(self, elt, pyobj): '''use this method to set the prefix of the QName, method looks in DOM to find prefix or set new prefix. This method must be called before get_formatted_content. ''' if isinstance(pyobj, tuple): namespaceURI,localName = pyobj self.prefix = elt.getPrefix(namespaceURI)
use this method to set the prefix of the QName, method looks in DOM to find prefix or set new prefix. This method must be called before get_formatted_content.
def get_index_text(self, modname, name_cls): """Return index entry text based on object type.""" if self.objtype in ('class', 'record'): if not modname: return _('%s (built-in %s)') % (name_cls[0], self.objtype) return _('%s (%s in %s)') % (name_cls[0], self.objtype, modname) else: return ''
Return index entry text based on object type.
def ability(cls, id_, name, function_type, ability_id, general_id=0): """Define a function represented as a game ability.""" assert function_type in ABILITY_FUNCTIONS return cls(id_, name, ability_id, general_id, function_type, FUNCTION_TYPES[function_type], None)
Define a function represented as a game ability.
def get_external_command_output(command: str) -> bytes: """ Takes a command-line command, executes it, and returns its ``stdout`` output. Args: command: command string Returns: output from the command as ``bytes`` """ args = shlex.split(command) ret = subprocess.check_output(args) # this needs Python 2.7 or higher return ret
Takes a command-line command, executes it, and returns its ``stdout`` output. Args: command: command string Returns: output from the command as ``bytes``
def _create_ids(self, home_teams, away_teams): """ Creates IDs for both players/teams """ categories = pd.Categorical(np.append(home_teams,away_teams)) home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1] return home_id, away_id
Creates IDs for both players/teams
def from_int(data): """ :params data: integer :returns: proquint made from input data :type data: int :rtype: string """ if not isinstance(data, int) and not isinstance(data, long): raise TypeError('Input must be integer') res = [] while data > 0 or not res: for j in range(5): if not j % 2: res += CONSONANTS[(data & 0xf)] data >>= 4 else: res += VOWELS[(data & 0x3)] data >>= 2 if data > 0: res += '-' res.reverse() return ''.join(res)
:params data: integer :returns: proquint made from input data :type data: int :rtype: string
def align_file_position(f, size): """ Align the position in the file to the next block of specified size """ align = (size - 1) - (f.tell() % size) f.seek(align, 1)
Align the position in the file to the next block of specified size
def mstmap(args): """ %prog mstmap LMD50.snps.genotype.txt Convert LMDs to MSTMAP input. """ from jcvi.assembly.geneticmap import MSTMatrix p = OptionParser(mstmap.__doc__) p.add_option("--population_type", default="RIL6", help="Type of population, possible values are DH and RILd") p.add_option("--missing_threshold", default=.5, help="Missing threshold, .25 excludes any marker with >25% missing") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) lmd, = args fp = open(lmd) next(fp) # Header table = {"0": "-", "1": "A", "2": "B", "3": "X"} mh = ["locus_name"] + fp.next().split()[4:] genotypes = [] for row in fp: atoms = row.split() chr, pos, ref, alt = atoms[:4] locus_name = ".".join((chr, pos)) codes = [table[x] for x in atoms[4:]] genotypes.append([locus_name] + codes) mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold) mm.write(opts.outfile, header=True)
%prog mstmap LMD50.snps.genotype.txt Convert LMDs to MSTMAP input.
def from_dict(cls, d): """ Restores an object state from a dictionary, used in de-JSONification. :param d: the object dictionary :type d: dict :return: the object :rtype: object """ conf = {} for k in d["config"]: v = d["config"][k] if isinstance(v, dict): if u"type" in v: typestr = v[u"type"] else: typestr = v["type"] conf[str(k)] = classes.get_dict_handler(typestr)(v) else: conf[str(k)] = v return classes.get_class(d["class"])(name=d["name"], config=conf)
Restores an object state from a dictionary, used in de-JSONification. :param d: the object dictionary :type d: dict :return: the object :rtype: object
def convert_reshape(net, node, module, builder): """Converts a reshape layer from mxnet to coreml. This doesn't currently handle the deprecated parameters for the reshape layer. Parameters ---------- net: network An mxnet network object. node: layer Node to convert. module: module A module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) target_shape = literal_eval(param['shape']) if target_shape == (0, -1): convert_flatten(net, node, module, builder) return if any(item <= 0 for item in target_shape): raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.') if 'reverse' in node and node['reverse'] == 'True': raise NotImplementedError('"reverse" parameter is not supported by yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.') mode = 0 # CHANNEL_FIRST builder.add_reshape(name, input_name, output_name, target_shape, mode)
Converts a reshape layer from mxnet to coreml. This doesn't currently handle the deprecated parameters for the reshape layer. Parameters ---------- net: network An mxnet network object. node: layer Node to convert. module: module A module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def get_user(self, username): """ Given the verified username, look up and return the corresponding user account if it exists, or raising ``ActivationError`` if it doesn't. """ User = get_user_model() try: user = User.objects.get(**{ User.USERNAME_FIELD: username, }) if user.is_active: raise ActivationError( self.ALREADY_ACTIVATED_MESSAGE, code='already_activated' ) return user except User.DoesNotExist: raise ActivationError( self.BAD_USERNAME_MESSAGE, code='bad_username' )
Given the verified username, look up and return the corresponding user account if it exists, or raising ``ActivationError`` if it doesn't.
def headerData(self, section, orientation, role=Qt.DisplayRole): """ Reimplements the :meth:`QAbstractItemModel.headerData` method. :param section: Section. :type section: int :param orientation: Orientation. ( Qt.Orientation ) :param role: Role. :type role: int :return: Header data. :rtype: QVariant """ if role == Qt.DisplayRole: if orientation == Qt.Horizontal: if section < len(self.__horizontal_headers): return self.__horizontal_headers.keys()[section] elif orientation == Qt.Vertical: if section < len(self.__vertical_headers): return self.__vertical_headers.keys()[section] return QVariant()
Reimplements the :meth:`QAbstractItemModel.headerData` method. :param section: Section. :type section: int :param orientation: Orientation. ( Qt.Orientation ) :param role: Role. :type role: int :return: Header data. :rtype: QVariant
def intersection(self, *args): '''Returns the intersection of the values whose keys are in *args. If *args is blank, returns the intersection of all values. ''' values = self.values() if args: values = [val for key,val in self.items() if key in args] return set(reduce(set.intersection, values))
Returns the intersection of the values whose keys are in *args. If *args is blank, returns the intersection of all values.
def total_supply(self, block_identifier='latest'): """ Return the total supply of the token at the given block identifier. """ return self.proxy.contract.functions.totalSupply().call(block_identifier=block_identifier)
Return the total supply of the token at the given block identifier.
def id_generator(size=15, random_state=None): """Helper function to generate random div ids. This is useful for embedding HTML into ipython notebooks.""" chars = list(string.ascii_uppercase + string.digits) return ''.join(random_state.choice(chars, size, replace=True))
Helper function to generate random div ids. This is useful for embedding HTML into ipython notebooks.
def encode(data, scheme=None, size=None): """ Encodes `data` in a DataMatrix image. For now bpp is the libdmtx default which is 24 Args: data: bytes instance scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`. If `None`, defaults to 'Ascii'. size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`. If `None`, defaults to 'ShapeAuto'. Returns: Encoded: with properties `(width, height, bpp, pixels)`. You can use that result to build a PIL image: Image.frombytes('RGB', (width, height), pixels) """ size = size if size else 'ShapeAuto' size_name = '{0}{1}'.format(ENCODING_SIZE_PREFIX, size) if not hasattr(DmtxSymbolSize, size_name): raise PyLibDMTXError( 'Invalid size [{0}]: should be one of {1}'.format( size, ENCODING_SIZE_NAMES ) ) size = getattr(DmtxSymbolSize, size_name) scheme = scheme if scheme else 'Ascii' scheme_name = '{0}{1}'.format( ENCODING_SCHEME_PREFIX, scheme.capitalize() ) if not hasattr(DmtxScheme, scheme_name): raise PyLibDMTXError( 'Invalid scheme [{0}]: should be one of {1}'.format( scheme, ENCODING_SCHEME_NAMES ) ) scheme = getattr(DmtxScheme, scheme_name) with _encoder() as encoder: dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropScheme, scheme) dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropSizeRequest, size) if dmtxEncodeDataMatrix(encoder, len(data), cast(data, c_ubyte_p)) == 0: raise PyLibDMTXError( 'Could not encode data, possibly because the image is not ' 'large enough to contain the data' ) w, h, bpp = map( partial(dmtxImageGetProp, encoder[0].image), ( DmtxProperty.DmtxPropWidth, DmtxProperty.DmtxPropHeight, DmtxProperty.DmtxPropBitsPerPixel ) ) size = w * h * bpp // 8 pixels = cast( encoder[0].image[0].pxl, ctypes.POINTER(ctypes.c_ubyte * size) ) return Encoded( width=w, height=h, bpp=bpp, pixels=ctypes.string_at(pixels, size) )
Encodes `data` in a DataMatrix image. For now bpp is the libdmtx default which is 24 Args: data: bytes instance scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`. If `None`, defaults to 'Ascii'. size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`. If `None`, defaults to 'ShapeAuto'. Returns: Encoded: with properties `(width, height, bpp, pixels)`. You can use that result to build a PIL image: Image.frombytes('RGB', (width, height), pixels)
def _calc_min_size(self, conv_layers): """Calculates the minimum size of the input layer. Given a set of convolutional layers, calculate the minimum value of the `input_height` and `input_width`, i.e. such that the output has size 1x1. Assumes snt.VALID padding. Args: conv_layers: List of tuples `(output_channels, (kernel_size, stride), (pooling_size, pooling_stride))` Returns: Minimum value of input height and width. """ input_size = 1 for _, conv_params, max_pooling in reversed(conv_layers): if max_pooling is not None: kernel_size, stride = max_pooling input_size = input_size * stride + (kernel_size - stride) if conv_params is not None: kernel_size, stride = conv_params input_size = input_size * stride + (kernel_size - stride) return input_size
Calculates the minimum size of the input layer. Given a set of convolutional layers, calculate the minimum value of the `input_height` and `input_width`, i.e. such that the output has size 1x1. Assumes snt.VALID padding. Args: conv_layers: List of tuples `(output_channels, (kernel_size, stride), (pooling_size, pooling_stride))` Returns: Minimum value of input height and width.
def getnames(): """ get mail names """ namestring = "" addmore = 1 while addmore: scientist = input("Enter name - <Return> when done ") if scientist != "": namestring = namestring + ":" + scientist else: namestring = namestring[1:] addmore = 0 return namestring
get mail names
def substitute_selected_state(state, as_template=False, keep_name=False): """ Substitute the selected state with the handed state :param rafcon.core.states.state.State state: A state of any functional type that derives from State :param bool as_template: The flag determines if a handed the state of type LibraryState is insert as template :return: """ # print("substitute_selected_state", state, as_template) assert isinstance(state, State) from rafcon.core.states.barrier_concurrency_state import DeciderState if isinstance(state, DeciderState): raise ValueError("State of type DeciderState can not be substituted.") smm_m = rafcon.gui.singleton.state_machine_manager_model if not smm_m.selected_state_machine_id: logger.error("Selected state machine can not be found, please select a state within a state machine first.") return False selection = smm_m.state_machines[smm_m.selected_state_machine_id].selection selected_state_m = selection.get_selected_state() if len(selection.states) != 1: logger.error("Please select exactly one state for the substitution") return False if is_selection_inside_of_library_state(selected_elements=[selected_state_m]): logger.warning("Substitute is not performed because target state is inside of a library state.") return gui_helper_state.substitute_state_as(selected_state_m, state, as_template, keep_name) return True
Substitute the selected state with the handed state :param rafcon.core.states.state.State state: A state of any functional type that derives from State :param bool as_template: The flag determines if a handed the state of type LibraryState is insert as template :return:
def _cryptodome_encrypt(cipher_factory, plaintext, key, iv): """Use a Pycryptodome cipher factory to encrypt data. :param cipher_factory: Factory callable that builds a Pycryptodome Cipher instance based on the key and IV :type cipher_factory: callable :param bytes plaintext: Plaintext data to encrypt :param bytes key: Encryption key :param bytes IV: Initialization vector :returns: Encrypted ciphertext :rtype: bytes """ encryptor = cipher_factory(key, iv) return encryptor.encrypt(plaintext)
Use a Pycryptodome cipher factory to encrypt data. :param cipher_factory: Factory callable that builds a Pycryptodome Cipher instance based on the key and IV :type cipher_factory: callable :param bytes plaintext: Plaintext data to encrypt :param bytes key: Encryption key :param bytes IV: Initialization vector :returns: Encrypted ciphertext :rtype: bytes
def export_organizations(self, outfile): """Export organizations information to a file. The method exports information related to organizations, to the given 'outfile' output file. :param outfile: destination file object """ exporter = SortingHatOrganizationsExporter(self.db) dump = exporter.export() try: outfile.write(dump) outfile.write('\n') except IOError as e: raise RuntimeError(str(e)) return CMD_SUCCESS
Export organizations information to a file. The method exports information related to organizations, to the given 'outfile' output file. :param outfile: destination file object
def switch_region(request, region_name, redirect_field_name=auth.REDIRECT_FIELD_NAME): """Switches the user's region for all services except Identity service. The region will be switched if the given region is one of the regions available for the scoped project. Otherwise the region is not switched. """ if region_name in request.user.available_services_regions: request.session['services_region'] = region_name LOG.debug('Switching services region to %s for user "%s".', region_name, request.user.username) redirect_to = request.GET.get(redirect_field_name, '') if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = settings.LOGIN_REDIRECT_URL response = shortcuts.redirect(redirect_to) utils.set_response_cookie(response, 'services_region', request.session['services_region']) return response
Switches the user's region for all services except Identity service. The region will be switched if the given region is one of the regions available for the scoped project. Otherwise the region is not switched.
def drawpoint(self, x, y, colour = None): """ Most elementary drawing, single pixel, used mainly for testing purposes. Coordinates are those of your initial image ! """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilx, pily) = self.pilcoords((x,y)) self.draw.point((pilx, pily), fill = colour)
Most elementary drawing, single pixel, used mainly for testing purposes. Coordinates are those of your initial image !
def _run_snpeff(snp_in, out_format, data): """Run effects prediction with snpEff, skipping if snpEff database not present. """ snpeff_db, datadir = get_db(data) if not snpeff_db: return None, None assert os.path.exists(os.path.join(datadir, snpeff_db)), \ "Did not find %s snpEff genome data in %s" % (snpeff_db, datadir) ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv" out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext) stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0] csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0] if not utils.file_exists(out_file): config_args = " ".join(_snpeff_args_from_config(data)) if ext.endswith(".gz"): bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"]) else: bgzip_cmd = "" with file_transaction(data, out_file) as tx_out_file: snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file) cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} " "-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "snpEff effects", data) if ext.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file, [stats_file, csv_file]
Run effects prediction with snpEff, skipping if snpEff database not present.
def search(self, query, limit=None): """Use reddit's search function. Returns :class:`things.Listing` object. URL: ``http://www.reddit.com/search/?q=<query>&limit=<limit>`` :param query: query string :param limit: max number of results to get """ return self._limit_get('search', params=dict(q=query), limit=limit)
Use reddit's search function. Returns :class:`things.Listing` object. URL: ``http://www.reddit.com/search/?q=<query>&limit=<limit>`` :param query: query string :param limit: max number of results to get
def make_store(name, min_length=4, **kwargs): """\ Creates a store with a reasonable keygen. .. deprecated:: 2.0.0 Instantiate stores directly e.g. ``shorten.MemoryStore(min_length=4)`` """ if name not in stores: raise ValueError('valid stores are {0}'.format(', '.join(stores))) if name == 'memcache': store = MemcacheStore elif name == 'memory': store = MemoryStore elif name == 'redis': store = RedisStore return store(min_length=min_length, **kwargs)
\ Creates a store with a reasonable keygen. .. deprecated:: 2.0.0 Instantiate stores directly e.g. ``shorten.MemoryStore(min_length=4)``
def register_memory(): """Register an approximation of memory used by FTP server process and all of its children. """ # XXX How to get a reliable representation of memory being used is # not clear. (rss - shared) seems kind of ok but we might also use # the private working set via get_memory_maps().private*. def get_mem(proc): if os.name == 'posix': mem = proc.memory_info_ex() counter = mem.rss if 'shared' in mem._fields: counter -= mem.shared return counter else: # TODO figure out what to do on Windows return proc.get_memory_info().rss if SERVER_PROC is not None: mem = get_mem(SERVER_PROC) for child in SERVER_PROC.children(): mem += get_mem(child) server_memory.append(bytes2human(mem))
Register an approximation of memory used by FTP server process and all of its children.
def get_scenario(scenario_id,**kwargs): """ Get the specified scenario """ user_id = kwargs.get('user_id') scen_i = _get_scenario(scenario_id, user_id) scen_j = JSONObject(scen_i) rscen_rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.scenario_id==scenario_id).options(joinedload_all('dataset.metadata')).all() #lazy load resource attributes and attributes for rs in rscen_rs: rs.resourceattr rs.resourceattr.attr rgi_rs = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.scenario_id==scenario_id).all() scen_j.resourcescenarios = [] for rs in rscen_rs: rs_j = JSONObject(rs, extras={'resourceattr':JSONObject(rs.resourceattr)}) if rs.dataset.check_read_permission(user_id, do_raise=False) is False: rs_j.dataset['value'] = None rs_j.dataset.metadata = JSONObject({}) scen_j.resourcescenarios.append(rs_j) scen_j.resourcegroupitems =[JSONObject(r) for r in rgi_rs] return scen_j
Get the specified scenario
def load_services(self, services=settings.TH_SERVICES): """ get the service from the settings """ kwargs = {} for class_path in services: module_name, class_name = class_path.rsplit('.', 1) klass = import_from_path(class_path) service = klass(None, **kwargs) self.register(class_name, service)
get the service from the settings
def get_activity_mdata(): """Return default mdata map for Activity""" return { 'courses': { 'element_label': { 'text': 'courses', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id[] object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': True, 'default_id_values': [], 'syntax': 'ID', 'id_set': [], }, 'assessments': { 'element_label': { 'text': 'assessments', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id[] object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': True, 'default_id_values': [], 'syntax': 'ID', 'id_set': [], }, 'objective': { 'element_label': { 'text': 'objective', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, 'assets': { 'element_label': { 'text': 'assets', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id[] object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': True, 'default_id_values': [], 'syntax': 'ID', 'id_set': [], }, }
Return default mdata map for Activity
def _filter_choosers_alts(self, choosers, alternatives): """ Apply filters to the choosers and alts tables. """ return ( util.apply_filter_query( choosers, self.choosers_predict_filters), util.apply_filter_query( alternatives, self.alts_predict_filters))
Apply filters to the choosers and alts tables.
def require_meta_and_content(self, content_handler, params, **kwargs): """Require 'meta' and 'content' dictionaries using proper hander. Args: content_handler (callable): function that accepts ``params, meta, **kwargs`` argument and returns dictionary for ``content`` response section params (dict): dictionary of parsed resource parameters kwargs (dict): dictionary of values created from resource url template Returns: tuple (meta, content): two-tuple with dictionaries of ``meta`` and ``content`` response sections """ meta = { 'params': params } content = content_handler(params, meta, **kwargs) meta['params'] = params return meta, content
Require 'meta' and 'content' dictionaries using proper hander. Args: content_handler (callable): function that accepts ``params, meta, **kwargs`` argument and returns dictionary for ``content`` response section params (dict): dictionary of parsed resource parameters kwargs (dict): dictionary of values created from resource url template Returns: tuple (meta, content): two-tuple with dictionaries of ``meta`` and ``content`` response sections
def wrapped_request(self, request, *args, **kwargs): """Create and send a request to the server. This method implements a very small subset of the options possible to send an request. It is provided as a shortcut to sending a simple wrapped request. Parameters ---------- request : str The request to call. *args : list of objects Arguments to pass on to the request. Keyword Arguments ----------------- timeout : float or None, optional Timeout after this amount of seconds (keyword argument). mid : None or int, optional Message identifier to use for the request message. If None, use either auto-incrementing value or no mid depending on the KATCP protocol version (mid's were only introduced with KATCP v5) and the value of the `use_mid` argument. Defaults to None. use_mid : bool Use a mid for the request if True. Returns ------- future object that resolves with the :meth:`katcp.client.DeviceClient.future_request` response wrapped in self.reply_wrapper Example ------- :: wrapped_reply = yield ic.simple_request('help', 'sensor-list') """ f = tornado_Future() try: use_mid = kwargs.get('use_mid') timeout = kwargs.get('timeout') mid = kwargs.get('mid') msg = Message.request(request, *args, mid=mid) except Exception: f.set_exc_info(sys.exc_info()) return f return transform_future(self.reply_wrapper, self.katcp_client.future_request(msg, timeout, use_mid))
Create and send a request to the server. This method implements a very small subset of the options possible to send an request. It is provided as a shortcut to sending a simple wrapped request. Parameters ---------- request : str The request to call. *args : list of objects Arguments to pass on to the request. Keyword Arguments ----------------- timeout : float or None, optional Timeout after this amount of seconds (keyword argument). mid : None or int, optional Message identifier to use for the request message. If None, use either auto-incrementing value or no mid depending on the KATCP protocol version (mid's were only introduced with KATCP v5) and the value of the `use_mid` argument. Defaults to None. use_mid : bool Use a mid for the request if True. Returns ------- future object that resolves with the :meth:`katcp.client.DeviceClient.future_request` response wrapped in self.reply_wrapper Example ------- :: wrapped_reply = yield ic.simple_request('help', 'sensor-list')
def compute_csets_TRAM( connectivity, state_counts, count_matrices, equilibrium_state_counts=None, ttrajs=None, dtrajs=None, bias_trajs=None, nn=None, factor=1.0, callback=None): r""" Computes the largest connected sets in the produce space of Markov state and thermodynamic states for TRAM data. Parameters ---------- connectivity : string one of None, 'reversible_pathways', 'post_hoc_RE' or 'BAR_variance', 'neighbors', 'summed_count_matrix' or None. Selects the algorithm for measuring overlap between thermodynamic and Markov states. * 'reversible_pathways' : requires that every state in the connected set can be reached by following a pathway of reversible transitions. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). A pathway of reversible transitions is a list of reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)), (i_(N-1), i_N)]. The thermodynamic state where the reversible transitions happen, is ignored in constructing the reversible pathways. This is equivalent to assuming that two ensembles overlap at some Markov state whenever there exist frames from both ensembles in that Markov state. * 'largest' : alias for reversible_pathways * 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict requirement for the overlap between thermodynamic states. It is required that every state in the connected set can be reached by following a pathway of reversible transitions or jumping between overlapping thermodynamic states while staying in the same Markov state. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). Two thermodynamic states k and l are defined to overlap at Markov state n if a replica exchange simulation [2]_ restricted to state n would show at least one transition from k to l or one transition from from l to k. The expected number of replica exchanges is estimated from the simulation data. The minimal number required of replica exchanges per Markov state can be increased by decreasing `connectivity_factor`. * 'BAR_variance' : like 'post_hoc_RE' but with a different condition to define the thermodynamic overlap based on the variance of the BAR estimator [3]_. Two thermodynamic states k and l are defined to overlap at Markov state n if the variance of the free energy difference Delta f_{kl} computed with BAR (and restricted to conformations form Markov state n) is less or equal than one. The minimally required variance can be controlled with `connectivity_factor`. * 'neighbors' : like 'post_hoc_RE' or 'BAR_variance' but assume a overlap between "neighboring" thermodynamic states. It is assumed that the data comes from an Umbrella sampling simulation and the number of the thermodynamic state matches the position of the Umbrella along the order parameter. The overlap of thermodynamic states k and l within Markov state n is set according to the value of nn; if there are samples in both product-space states (k,n) and (l,n) and |l-n|<=nn, the states are overlapping. * 'summed_count_matrix' : all thermodynamic states are assumed to overlap. The connected set is then computed by summing the count matrices over all thermodynamic states and taking it's largest strongly connected set. Not recommended! * None : assume that everything is connected. For debugging. state_counts : numpy.ndarray((T, M), dtype=numpy.intc) Number of visits to the combinations of thermodynamic state t and Markov state m count_matrices : numpy.ndarray((T, M, M), dtype=numpy.intc) Count matrices for all T thermodynamic states. equilibrium_state_counts : numpy.dnarray((T, M)), optional Number of visits to the combinations of thermodynamic state t and Markov state m in the equilibrium data (for use with TRAMMBAR). ttrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional List of generating thermodynamic state trajectories. dtrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional List of configurational state trajectories (disctrajs). bias_trajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64), optional List of bias energy trajectories. The last three parameters are only required for connectivity = 'post_hoc_RE' or connectivity = 'BAR_variance'. nn : int, optional Number of neighbors that are assumed to overlap when connectivity='neighbors' factor : int, default=1.0 scaling factor used for connectivity = 'post_hoc_RE' or 'BAR_variance'. Values greater than 1.0 weaken the connectivity conditions. For 'post_hoc_RE' this multiplies the number of hypothetically observed transitions. For 'BAR_variance' this scales the threshold for the minimal allowed variance of free energy differences. Returns ------- csets, projected_cset csets : list of ndarrays((X_i,), dtype=int) List indexed by thermodynamic state. Every element csets[k] is the largest connected set at thermodynamic state k. projected_cset : ndarray(M, dtype=int) The overall connected set. This is the union of the individual connected sets of the thermodynamic states. References: ----------- [1]_ Hukushima et al, Exchange Monte Carlo method and application to spin glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996) [2]_ Shirts and Chodera, Statistically optimal analysis of samples from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008) """ return _compute_csets( connectivity, state_counts, count_matrices, ttrajs, dtrajs, bias_trajs, nn=nn, equilibrium_state_counts=equilibrium_state_counts, factor=factor, callback=callback)
r""" Computes the largest connected sets in the produce space of Markov state and thermodynamic states for TRAM data. Parameters ---------- connectivity : string one of None, 'reversible_pathways', 'post_hoc_RE' or 'BAR_variance', 'neighbors', 'summed_count_matrix' or None. Selects the algorithm for measuring overlap between thermodynamic and Markov states. * 'reversible_pathways' : requires that every state in the connected set can be reached by following a pathway of reversible transitions. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). A pathway of reversible transitions is a list of reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)), (i_(N-1), i_N)]. The thermodynamic state where the reversible transitions happen, is ignored in constructing the reversible pathways. This is equivalent to assuming that two ensembles overlap at some Markov state whenever there exist frames from both ensembles in that Markov state. * 'largest' : alias for reversible_pathways * 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict requirement for the overlap between thermodynamic states. It is required that every state in the connected set can be reached by following a pathway of reversible transitions or jumping between overlapping thermodynamic states while staying in the same Markov state. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). Two thermodynamic states k and l are defined to overlap at Markov state n if a replica exchange simulation [2]_ restricted to state n would show at least one transition from k to l or one transition from from l to k. The expected number of replica exchanges is estimated from the simulation data. The minimal number required of replica exchanges per Markov state can be increased by decreasing `connectivity_factor`. * 'BAR_variance' : like 'post_hoc_RE' but with a different condition to define the thermodynamic overlap based on the variance of the BAR estimator [3]_. Two thermodynamic states k and l are defined to overlap at Markov state n if the variance of the free energy difference Delta f_{kl} computed with BAR (and restricted to conformations form Markov state n) is less or equal than one. The minimally required variance can be controlled with `connectivity_factor`. * 'neighbors' : like 'post_hoc_RE' or 'BAR_variance' but assume a overlap between "neighboring" thermodynamic states. It is assumed that the data comes from an Umbrella sampling simulation and the number of the thermodynamic state matches the position of the Umbrella along the order parameter. The overlap of thermodynamic states k and l within Markov state n is set according to the value of nn; if there are samples in both product-space states (k,n) and (l,n) and |l-n|<=nn, the states are overlapping. * 'summed_count_matrix' : all thermodynamic states are assumed to overlap. The connected set is then computed by summing the count matrices over all thermodynamic states and taking it's largest strongly connected set. Not recommended! * None : assume that everything is connected. For debugging. state_counts : numpy.ndarray((T, M), dtype=numpy.intc) Number of visits to the combinations of thermodynamic state t and Markov state m count_matrices : numpy.ndarray((T, M, M), dtype=numpy.intc) Count matrices for all T thermodynamic states. equilibrium_state_counts : numpy.dnarray((T, M)), optional Number of visits to the combinations of thermodynamic state t and Markov state m in the equilibrium data (for use with TRAMMBAR). ttrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional List of generating thermodynamic state trajectories. dtrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional List of configurational state trajectories (disctrajs). bias_trajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64), optional List of bias energy trajectories. The last three parameters are only required for connectivity = 'post_hoc_RE' or connectivity = 'BAR_variance'. nn : int, optional Number of neighbors that are assumed to overlap when connectivity='neighbors' factor : int, default=1.0 scaling factor used for connectivity = 'post_hoc_RE' or 'BAR_variance'. Values greater than 1.0 weaken the connectivity conditions. For 'post_hoc_RE' this multiplies the number of hypothetically observed transitions. For 'BAR_variance' this scales the threshold for the minimal allowed variance of free energy differences. Returns ------- csets, projected_cset csets : list of ndarrays((X_i,), dtype=int) List indexed by thermodynamic state. Every element csets[k] is the largest connected set at thermodynamic state k. projected_cset : ndarray(M, dtype=int) The overall connected set. This is the union of the individual connected sets of the thermodynamic states. References: ----------- [1]_ Hukushima et al, Exchange Monte Carlo method and application to spin glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996) [2]_ Shirts and Chodera, Statistically optimal analysis of samples from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008)
def update_member_names(oldasndict, pydr_input): """ Update names in a member dictionary. Given an association dictionary with rootnames and a list of full file names, it will update the names in the member dictionary to contain '_*' extension. For example a rootname of 'u9600201m' will be replaced by 'u9600201m_c0h' making sure that a MEf file is passed as an input and not the corresponding GEIS file. """ omembers = oldasndict['members'].copy() nmembers = {} translated_names = [f.split('.fits')[0] for f in pydr_input] newkeys = [fileutil.buildNewRootname(file) for file in pydr_input] keys_map = list(zip(newkeys, pydr_input)) for okey, oval in list(omembers.items()): if okey in newkeys: nkey = pydr_input[newkeys.index(okey)] nmembers[nkey.split('.fits')[0]] = oval oldasndict.pop('members') # replace should be always True to cover the case when flt files were removed # and the case when names were translated oldasndict.update(members=nmembers, replace=True) oldasndict['order'] = translated_names return oldasndict
Update names in a member dictionary. Given an association dictionary with rootnames and a list of full file names, it will update the names in the member dictionary to contain '_*' extension. For example a rootname of 'u9600201m' will be replaced by 'u9600201m_c0h' making sure that a MEf file is passed as an input and not the corresponding GEIS file.
def project(self, x, vector): '''Project a vector (gradient or direction) on the active constraints. Arguments: | ``x`` -- The unknowns. | ``vector`` -- A numpy array with a direction or a gradient. The return value is a gradient or direction, where the components that point away from the constraints are projected out. In case of half-open constraints, the projection is only active of the vector points into the infeasible region. ''' scale = np.linalg.norm(vector) if scale == 0.0: return vector self.lock[:] = False normals, signs = self._compute_equations(x)[::3] if len(normals) == 0: return vector vector = vector/scale mask = signs == 0 result = vector.copy() changed = True counter = 0 while changed: changed = False y = np.dot(normals, result) for i, sign in enumerate(signs): if sign != 0: if sign*y[i] < -self.threshold: mask[i] = True changed = True elif mask[i] and np.dot(normals[i], result-vector) < 0: mask[i] = False changed = True if mask.any(): normals_select = normals[mask] y = np.dot(normals_select, vector) U, S, Vt = np.linalg.svd(normals_select, full_matrices=False) if S.min() == 0.0: Sinv = S/(S**2+self.rcond1) else: Sinv = 1.0/S result = vector - np.dot(Vt.transpose(), np.dot(U.transpose(), y)*Sinv) else: result = vector.copy() if counter > self.max_iter: raise ConstraintError('Exceeded maximum number of shake iterations.') counter += 1 return result*scale
Project a vector (gradient or direction) on the active constraints. Arguments: | ``x`` -- The unknowns. | ``vector`` -- A numpy array with a direction or a gradient. The return value is a gradient or direction, where the components that point away from the constraints are projected out. In case of half-open constraints, the projection is only active of the vector points into the infeasible region.
def optimize_seq_and_branch_len(self,reuse_branch_len=True, prune_short=True, marginal_sequences=False, branch_length_mode='joint', max_iter=5, infer_gtr=False, **kwargs): """ Iteratively set branch lengths and reconstruct ancestral sequences until the values of either former or latter do not change. The algorithm assumes knowing only the topology of the tree, and requires that sequences are assigned to all leaves of the tree. The first step is to pre-reconstruct ancestral states using Fitch reconstruction algorithm or ML using existing branch length estimates. Then, optimize branch lengths and re-do reconstruction until convergence using ML method. Parameters ----------- reuse_branch_len : bool If True, rely on the initial branch lengths, and start with the maximum-likelihood ancestral sequence inference using existing branch lengths. Otherwise, do initial reconstruction of ancestral states with Fitch algorithm, which uses only the tree topology. prune_short : bool If True, the branches with zero optimal length will be pruned from the tree, creating polytomies. The polytomies could be further processed using :py:meth:`treetime.TreeTime.resolve_polytomies` from the TreeTime class. marginal_sequences : bool Assign sequences to their marginally most likely value, rather than the values that are jointly most likely across all nodes. branch_length_mode : str 'joint', 'marginal', or 'input'. Branch lengths are left unchanged in case of 'input'. 'joint' and 'marginal' cause branch length optimization while setting sequences to the ML value or tracing over all possible internal sequence states. max_iter : int Maximal number of times sequence and branch length iteration are optimized infer_gtr : bool Infer a GTR model from the observed substitutions. """ if branch_length_mode=='marginal': marginal_sequences = True self.logger("TreeAnc.optimize_sequences_and_branch_length: sequences...", 1) if reuse_branch_len: N_diff = self.reconstruct_anc(method='probabilistic', infer_gtr=infer_gtr, marginal=marginal_sequences, **kwargs) self.optimize_branch_len(verbose=0, store_old=False, mode=branch_length_mode) else: N_diff = self.reconstruct_anc(method='fitch', infer_gtr=infer_gtr, **kwargs) self.optimize_branch_len(verbose=0, store_old=False, marginal=False) n = 0 while n<max_iter: n += 1 if prune_short: self.prune_short_branches() N_diff = self.reconstruct_anc(method='probabilistic', infer_gtr=False, marginal=marginal_sequences, **kwargs) self.logger("TreeAnc.optimize_sequences_and_branch_length: Iteration %d." " #Nuc changed since prev reconstructions: %d" %(n, N_diff), 2) if N_diff < 1: break self.optimize_branch_len(verbose=0, store_old=False, mode=branch_length_mode) self.tree.unconstrained_sequence_LH = (self.tree.sequence_LH*self.multiplicity).sum() self._prepare_nodes() # fix dist2root and up-links after reconstruction self.logger("TreeAnc.optimize_sequences_and_branch_length: Unconstrained sequence LH:%f" % self.tree.unconstrained_sequence_LH , 2) return ttconf.SUCCESS
Iteratively set branch lengths and reconstruct ancestral sequences until the values of either former or latter do not change. The algorithm assumes knowing only the topology of the tree, and requires that sequences are assigned to all leaves of the tree. The first step is to pre-reconstruct ancestral states using Fitch reconstruction algorithm or ML using existing branch length estimates. Then, optimize branch lengths and re-do reconstruction until convergence using ML method. Parameters ----------- reuse_branch_len : bool If True, rely on the initial branch lengths, and start with the maximum-likelihood ancestral sequence inference using existing branch lengths. Otherwise, do initial reconstruction of ancestral states with Fitch algorithm, which uses only the tree topology. prune_short : bool If True, the branches with zero optimal length will be pruned from the tree, creating polytomies. The polytomies could be further processed using :py:meth:`treetime.TreeTime.resolve_polytomies` from the TreeTime class. marginal_sequences : bool Assign sequences to their marginally most likely value, rather than the values that are jointly most likely across all nodes. branch_length_mode : str 'joint', 'marginal', or 'input'. Branch lengths are left unchanged in case of 'input'. 'joint' and 'marginal' cause branch length optimization while setting sequences to the ML value or tracing over all possible internal sequence states. max_iter : int Maximal number of times sequence and branch length iteration are optimized infer_gtr : bool Infer a GTR model from the observed substitutions.
def get_xy_environment(self, xy): '''Get manager address for the environment which should have the agent with given *xy* coordinate, or None if no such environment is in this multi-environment. ''' x = xy[0] y = xy[1] for origin, addr in self._slave_origins: ox = origin[0] oy = origin[1] if ox <= x < ox + self.gs[0] and oy <= y < oy + self.gs[1]: return addr return None
Get manager address for the environment which should have the agent with given *xy* coordinate, or None if no such environment is in this multi-environment.
async def start(self): """Connect to device and listen to incoming messages.""" if self.connection.connected: return await self.connection.connect() # In case credentials have been given externally (i.e. not by pairing # with a device), then use that client id if self.service.device_credentials: self.srp.pairing_id = Credentials.parse( self.service.device_credentials).client_id # The first message must always be DEVICE_INFORMATION, otherwise the # device will not respond with anything msg = messages.device_information( 'pyatv', self.srp.pairing_id.decode()) await self.send_and_receive(msg) self._initial_message_sent = True # This should be the first message sent after encryption has # been enabled await self.send(messages.set_ready_state()) async def _wait_for_updates(_, semaphore): # Use a counter here whenever more than one message is expected semaphore.release() # Wait for some stuff to arrive before returning semaphore = asyncio.Semaphore(value=0, loop=self.loop) self.add_listener(_wait_for_updates, protobuf.SET_STATE_MESSAGE, data=semaphore, one_shot=True) # Subscribe to updates at this stage await self.send(messages.client_updates_config()) await self.send(messages.wake_device()) try: await asyncio.wait_for( semaphore.acquire(), 1, loop=self.loop) except asyncio.TimeoutError: # This is not an issue itself, but I should do something better. # Basically this gives the device about one second to respond with # some metadata before continuing. pass
Connect to device and listen to incoming messages.
def cleanParagraph(self): """ Compress text runs, remove whitespace at start and end, skip empty blocks, etc """ runs = self.block.content if not runs: self.block = None return if not self.clean_paragraphs: return joinedRuns = [] hasContent = False for run in runs: if run.content[0]: hasContent = True else: continue # For whitespace-only groups, remove any property stuff, # to avoid extra markup in output if not run.content[0].strip(): run.properties = {} # Join runs only if their properties match if joinedRuns and (run.properties == joinedRuns[-1].properties): joinedRuns[-1].content[0] += run.content[0] else: joinedRuns.append(run) if hasContent: # Strip beginning of paragraph joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip() # And then strip the end joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip() self.block.content = joinedRuns else: self.block = None
Compress text runs, remove whitespace at start and end, skip empty blocks, etc
def transform(self, X, y=None, copy=None): """ Perform standardization by centering and scaling using the parameters. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for scikit-learn ``Pipeline`` compatibility. :type y: None :param bool copy: Copy the X matrix. :return: Scaled version of the X data matrix. :rtype: numpy.ndarray, shape [n_samples, n_features] """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X
Perform standardization by centering and scaling using the parameters. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for scikit-learn ``Pipeline`` compatibility. :type y: None :param bool copy: Copy the X matrix. :return: Scaled version of the X data matrix. :rtype: numpy.ndarray, shape [n_samples, n_features]
def getresponse(self): ''' Gets the response and generates the _Response object''' status = self._httprequest.status() status_text = self._httprequest.status_text() resp_headers = self._httprequest.get_all_response_headers() fixed_headers = [] for resp_header in resp_headers.split('\n'): if (resp_header.startswith('\t') or\ resp_header.startswith(' ')) and fixed_headers: # append to previous header fixed_headers[-1] += resp_header else: fixed_headers.append(resp_header) headers = [] for resp_header in fixed_headers: if ':' in resp_header: pos = resp_header.find(':') headers.append( (resp_header[:pos].lower(), resp_header[pos + 1:].strip())) body = self._httprequest.response_body() length = len(body) return _Response(status, status_text, length, headers, body)
Gets the response and generates the _Response object
def _bisect(value_and_gradients_function, initial_args, f_lim): """Actual implementation of bisect given initial_args in a _BracketResult.""" def _loop_cond(curr): # TODO(b/112524024): Also take into account max_iterations. return ~tf.reduce_all(input_tensor=curr.stopped) def _loop_body(curr): """Narrow down interval to satisfy opposite slope conditions.""" mid = value_and_gradients_function((curr.left.x + curr.right.x) / 2) # Fail if function values at mid point are no longer finite; or left/right # points are so close to it that we can't distinguish them any more. failed = (curr.failed | ~is_finite(mid) | tf.equal(mid.x, curr.left.x) | tf.equal(mid.x, curr.right.x)) # If mid point has a negative slope and the function value at that point is # small enough, we can use it as a new left end point to narrow down the # interval. If mid point has a positive slope, then we have found a suitable # right end point to bracket a minima within opposite slopes. Otherwise, the # mid point has a negative slope but the function value at that point is too # high to work as left end point, we are in the same situation in which we # started the loop so we just update the right end point and continue. to_update = ~(curr.stopped | failed) update_left = (mid.df < 0) & (mid.f <= f_lim) left = val_where(to_update & update_left, mid, curr.left) right = val_where(to_update & ~update_left, mid, curr.right) # We're done when the right end point has a positive slope. stopped = curr.stopped | failed | (right.df >= 0) return [_IntermediateResult( iteration=curr.iteration, stopped=stopped, failed=failed, num_evals=curr.num_evals + 1, left=left, right=right)] # The interval needs updating if the right end point has a negative slope and # the value of the function at that point is too high. It is not a valid left # end point but along with the current left end point, it encloses another # minima. The loop above tries to narrow the interval so that it satisfies the # opposite slope conditions. return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
Actual implementation of bisect given initial_args in a _BracketResult.
def ahead(self, i, j=None): '''Raising stopiteration with end the parse. ''' if j is None: return self._stream[self.i + i] else: return self._stream[self.i + i: self.i + j]
Raising stopiteration with end the parse.
def getItemTrace(self): """Returns a node trace up to the <schema> item. """ item, path, name, ref = self, [], 'name', 'ref' while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter): attr = item.getAttribute(name) if not attr: attr = item.getAttribute(ref) if not attr: path.append('<%s>' %(item.tag)) else: path.append('<%s ref="%s">' %(item.tag, attr)) else: path.append('<%s name="%s">' %(item.tag,attr)) item = item._parent() try: tns = item.getTargetNamespace() except: tns = '' path.append('<%s targetNamespace="%s">' %(item.tag, tns)) path.reverse() return ''.join(path)
Returns a node trace up to the <schema> item.
def format(self, data, *args, **kwargs): ''' 将传入的Post列表数据进行格式化处理。此处传入的 ``data`` 格式即为 :meth:`.ZhihuDaily.crawl` 返回的格式,但具体内容可以不同,即此处保留了灵活度, 可以对非当日文章对象进行格式化,制作相关主题的合集书籍 :param data: 待处理的文章列表 :type data: list :return: 返回符合mobi打包需求的定制化数据结构 :rtype: dict ''' sections = OrderedDict() hot_list = [] normal_list = [] for item in data: meta = item.get('meta', []) # 如果标题为空,则迭代下一条目 if not item.get('title'): continue soup = BeautifulSoup(item.get('content'), "lxml") # 清洗文章内容,去除无用内容 for view_more in soup.select('.view-more'): view_more.extract() item['content'] = str(soup.div) # 处理文章摘要,若为空则根据正文自动生成并填充 if not item.get('excerpt') and item.get('content'): word_limit = self.options.get( 'toc_desc_word_limit', 500) content_list = soup.select('div.content') content_list = [content.get_text() for content in content_list] excerpt = ' '.join(content_list)[:word_limit] # 此处摘要信息需进行HTML转义,否则会造成toc.ncx中tag处理错误 item['excerpt'] = html.escape(excerpt) # 从item中提取出section分组 top = meta.pop('spider.zhihu_daily.top', '0') item['meta'] = meta if str(top) == '1': hot_list.append(item) else: normal_list.append(item) if hot_list: sections.setdefault('热闻', hot_list) if normal_list: sections.setdefault('日报', normal_list) return sections
将传入的Post列表数据进行格式化处理。此处传入的 ``data`` 格式即为 :meth:`.ZhihuDaily.crawl` 返回的格式,但具体内容可以不同,即此处保留了灵活度, 可以对非当日文章对象进行格式化,制作相关主题的合集书籍 :param data: 待处理的文章列表 :type data: list :return: 返回符合mobi打包需求的定制化数据结构 :rtype: dict
def _extend_nocheck(self, iterable): """extends without checking for uniqueness This function should only be used internally by DictList when it can guarantee elements are already unique (as in when coming from self or other DictList). It will be faster because it skips these checks. """ current_length = len(self) list.extend(self, iterable) _dict = self._dict if current_length is 0: self._generate_index() return for i, obj in enumerate(islice(self, current_length, None), current_length): _dict[obj.id] = i
extends without checking for uniqueness This function should only be used internally by DictList when it can guarantee elements are already unique (as in when coming from self or other DictList). It will be faster because it skips these checks.
def get_all_in_collection(self, collection_paths: Union[str, Iterable[str]], load_metadata: bool = True) \ -> Sequence[EntityType]: """ Gets entities contained within the given iRODS collections. If one or more of the collection_paths does not exist, a `FileNotFound` exception will be raised. :param collection_paths: the collection(s) to get the entities from :param load_metadata: whether metadata associated to the entities should be loaded :return: the entities loaded from iRODS """
Gets entities contained within the given iRODS collections. If one or more of the collection_paths does not exist, a `FileNotFound` exception will be raised. :param collection_paths: the collection(s) to get the entities from :param load_metadata: whether metadata associated to the entities should be loaded :return: the entities loaded from iRODS
def make_named_stemmer(stem=None, min_len=3): """Construct a callable object and a string sufficient to reconstruct it later (unpickling) >>> make_named_stemmer('str_lower') ('str_lower', <function str_lower at ...>) >>> make_named_stemmer('Lancaster') ('lancaster', <Stemmer object at ...>) """ name, stem = stringify(stem), make_stemmer(stem=stem, min_len=min_len) if hasattr(stem, '__name__'): return stem.__name__, stem if name.strip().lower() in STEMMER_TYPES: return name.strip().lower(), stem if hasattr(stem, 'pattern'): return stem.pattern, stem return stringify(stem), stem
Construct a callable object and a string sufficient to reconstruct it later (unpickling) >>> make_named_stemmer('str_lower') ('str_lower', <function str_lower at ...>) >>> make_named_stemmer('Lancaster') ('lancaster', <Stemmer object at ...>)
def get_teachers_sorted(self): """Get teachers sorted by last name. This is used for the announcement request page. """ teachers = self.get_teachers() teachers = [(u.last_name, u.first_name, u.id) for u in teachers] for t in teachers: if t is None or t[0] is None or t[1] is None or t[2] is None: teachers.remove(t) for t in teachers: if t[0] is None or len(t[0]) <= 1: teachers.remove(t) teachers.sort(key=lambda u: (u[0], u[1])) # Hack to return QuerySet in given order id_list = [t[2] for t in teachers] clauses = ' '.join(['WHEN id=%s THEN %s' % (pk, i) for i, pk in enumerate(id_list)]) ordering = 'CASE %s END' % clauses queryset = User.objects.filter(id__in=id_list).extra(select={'ordering': ordering}, order_by=('ordering',)) return queryset
Get teachers sorted by last name. This is used for the announcement request page.
def mgmt_root(opt_bigip, opt_username, opt_password, opt_port, opt_token): '''bigip fixture''' try: from pytest import symbols except ImportError: m = ManagementRoot(opt_bigip, opt_username, opt_password, port=opt_port, token=opt_token) else: if symbols is not None: m = ManagementRoot(symbols.bigip_mgmt_ip_public, symbols.bigip_username, symbols.bigip_password, port=opt_port, token=opt_token) else: m = ManagementRoot(opt_bigip, opt_username, opt_password, port=opt_port, token=opt_token) return m
bigip fixture
def merge_dict(d0, d1, add_new_keys=False, append_arrays=False): """Recursively merge the contents of python dictionary d0 with the contents of another python dictionary, d1. Parameters ---------- d0 : dict The input dictionary. d1 : dict Dictionary to be merged with the input dictionary. add_new_keys : str Do not skip keys that only exist in d1. append_arrays : bool If an element is a numpy array set the value of that element by concatenating the two arrays. """ if d1 is None: return d0 elif d0 is None: return d1 elif d0 is None and d1 is None: return {} od = {} for k, v in d0.items(): t0 = None t1 = None if k in d0: t0 = type(d0[k]) if k in d1: t1 = type(d1[k]) if k not in d1: od[k] = copy.deepcopy(d0[k]) elif isinstance(v, dict) and isinstance(d1[k], dict): od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays) elif isinstance(v, list) and isstr(d1[k]): od[k] = d1[k].split(',') elif isinstance(v, dict) and d1[k] is None: od[k] = copy.deepcopy(d0[k]) elif isinstance(v, np.ndarray) and append_arrays: od[k] = np.concatenate((v, d1[k])) elif (d0[k] is not None and d1[k] is not None) and t0 != t1: if t0 == dict or t0 == list: raise Exception('Conflicting types in dictionary merge for ' 'key %s %s %s' % (k, t0, t1)) od[k] = t0(d1[k]) else: od[k] = copy.copy(d1[k]) if add_new_keys: for k, v in d1.items(): if k not in d0: od[k] = copy.deepcopy(d1[k]) return od
Recursively merge the contents of python dictionary d0 with the contents of another python dictionary, d1. Parameters ---------- d0 : dict The input dictionary. d1 : dict Dictionary to be merged with the input dictionary. add_new_keys : str Do not skip keys that only exist in d1. append_arrays : bool If an element is a numpy array set the value of that element by concatenating the two arrays.
def _example_broker_queue(quote_ctx): """ 获取经纪队列,输出 买盘卖盘的经纪ID,经纪名称,经纪档位 """ stock_code_list = ["HK.00700"] for stk_code in stock_code_list: ret_status, ret_data = quote_ctx.subscribe(stk_code, ft.SubType.BROKER) if ret_status != ft.RET_OK: print(ret_data) exit() for stk_code in stock_code_list: ret_status, bid_data, ask_data = quote_ctx.get_broker_queue(stk_code) if ret_status != ft.RET_OK: print(bid_data) exit() print("%s BROKER" % stk_code) print(ask_data) print("\n\n") print(bid_data) print("\n\n")
获取经纪队列,输出 买盘卖盘的经纪ID,经纪名称,经纪档位
def download(self, files=None, formats=None, glob_pattern=None, dry_run=None, verbose=None, silent=None, ignore_existing=None, checksum=None, destdir=None, no_directory=None, retries=None, item_index=None, ignore_errors=None, on_the_fly=None, return_responses=None, no_change_timestamp=None, params=None): """Download files from an item. :param files: (optional) Only download files matching given file names. :type formats: str :param formats: (optional) Only download files matching the given Formats. :type glob_pattern: str :param glob_pattern: (optional) Only download files matching the given glob pattern. :type dry_run: bool :param dry_run: (optional) Output download URLs to stdout, don't download anything. :type verbose: bool :param verbose: (optional) Turn on verbose output. :type silent: bool :param silent: (optional) Suppress all output. :type ignore_existing: bool :param ignore_existing: (optional) Skip files that already exist locally. :type checksum: bool :param checksum: (optional) Skip downloading file based on checksum. :type destdir: str :param destdir: (optional) The directory to download files to. :type no_directory: bool :param no_directory: (optional) Download files to current working directory rather than creating an item directory. :type retries: int :param retries: (optional) The number of times to retry on failed requests. :type item_index: int :param item_index: (optional) The index of the item for displaying progress in bulk downloads. :type ignore_errors: bool :param ignore_errors: (optional) Don't fail if a single file fails to download, continue to download other files. :type on_the_fly: bool :param on_the_fly: (optional) Download on-the-fly files (i.e. derivative EPUB, MOBI, DAISY files). :type return_responses: bool :param return_responses: (optional) Rather than downloading files to disk, return a list of response objects. :type no_change_timestamp: bool :param no_change_timestamp: (optional) If True, leave the time stamp as the current time instead of changing it to that given in the original archive. :type params: dict :param params: (optional) URL parameters to send with download request (e.g. `cnt=0`). :rtype: bool :returns: True if if all files have been downloaded successfully. """ dry_run = False if dry_run is None else dry_run verbose = False if verbose is None else verbose silent = False if silent is None else silent ignore_existing = False if ignore_existing is None else ignore_existing ignore_errors = False if not ignore_errors else ignore_errors checksum = False if checksum is None else checksum no_directory = False if no_directory is None else no_directory return_responses = False if not return_responses else True no_change_timestamp = False if not no_change_timestamp else no_change_timestamp params = None if not params else params if not dry_run: if item_index and verbose is True: print('{0} ({1}):'.format(self.identifier, item_index)) elif item_index and silent is False: print('{0} ({1}): '.format(self.identifier, item_index), end='') elif item_index is None and verbose is True: print('{0}:'.format(self.identifier)) elif item_index is None and silent is False: print(self.identifier, end=': ') sys.stdout.flush() if self.is_dark is True: msg = 'skipping {0}, item is dark'.format(self.identifier) log.warning(msg) if verbose: print(' ' + msg) elif silent is False: print(msg) return elif self.metadata == {}: msg = 'skipping {0}, item does not exist.'.format(self.identifier) log.warning(msg) if verbose: print(' ' + msg) elif silent is False: print(msg) return if files: files = self.get_files(files, on_the_fly=on_the_fly) else: files = self.get_files(on_the_fly=on_the_fly) if formats: files = self.get_files(formats=formats, on_the_fly=on_the_fly) if glob_pattern: files = self.get_files(glob_pattern=glob_pattern, on_the_fly=on_the_fly) if not files: msg = 'skipping {0}, no matching files found.'.format(self.identifier) log.info(msg) if verbose: print(' ' + msg) elif silent is False: print(msg, end='') errors = list() responses = list() for f in files: if no_directory: path = f.name else: path = os.path.join(self.identifier, f.name) if dry_run: print(f.url) continue r = f.download(path, verbose, silent, ignore_existing, checksum, destdir, retries, ignore_errors, None, return_responses, no_change_timestamp, params) if return_responses: responses.append(r) if r is False: errors.append(f.name) if silent is False and verbose is False and dry_run is False: if errors: print(' - errors') else: print(' - success') if return_responses: return responses else: return errors
Download files from an item. :param files: (optional) Only download files matching given file names. :type formats: str :param formats: (optional) Only download files matching the given Formats. :type glob_pattern: str :param glob_pattern: (optional) Only download files matching the given glob pattern. :type dry_run: bool :param dry_run: (optional) Output download URLs to stdout, don't download anything. :type verbose: bool :param verbose: (optional) Turn on verbose output. :type silent: bool :param silent: (optional) Suppress all output. :type ignore_existing: bool :param ignore_existing: (optional) Skip files that already exist locally. :type checksum: bool :param checksum: (optional) Skip downloading file based on checksum. :type destdir: str :param destdir: (optional) The directory to download files to. :type no_directory: bool :param no_directory: (optional) Download files to current working directory rather than creating an item directory. :type retries: int :param retries: (optional) The number of times to retry on failed requests. :type item_index: int :param item_index: (optional) The index of the item for displaying progress in bulk downloads. :type ignore_errors: bool :param ignore_errors: (optional) Don't fail if a single file fails to download, continue to download other files. :type on_the_fly: bool :param on_the_fly: (optional) Download on-the-fly files (i.e. derivative EPUB, MOBI, DAISY files). :type return_responses: bool :param return_responses: (optional) Rather than downloading files to disk, return a list of response objects. :type no_change_timestamp: bool :param no_change_timestamp: (optional) If True, leave the time stamp as the current time instead of changing it to that given in the original archive. :type params: dict :param params: (optional) URL parameters to send with download request (e.g. `cnt=0`). :rtype: bool :returns: True if if all files have been downloaded successfully.
def pgcd(numa, numb): """ Calculate the greatest common divisor (GCD) of two numbers. :param numa: First number :type numa: number :param numb: Second number :type numb: number :rtype: number For example: >>> import pmisc, fractions >>> pmisc.pgcd(10, 15) 5 >>> str(pmisc.pgcd(0.05, 0.02)) '0.01' >>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6] '0.3333' >>> pmisc.pgcd( ... fractions.Fraction(str(5/3.0)), ... fractions.Fraction(str(2/3.0)) ... ) Fraction(1, 3) >>> pmisc.pgcd( ... fractions.Fraction(5, 3), ... fractions.Fraction(2, 3) ... ) Fraction(1, 3) """ # Test for integers this way to be valid also for Numpy data types without # actually importing (and package depending on) Numpy int_args = (int(numa) == numa) and (int(numb) == numb) fraction_args = isinstance(numa, Fraction) and isinstance(numb, Fraction) # Force conversion for Numpy data types if int_args: numa, numb = int(numa), int(numb) elif not fraction_args: numa, numb = float(numa), float(numb) # Limit floating numbers to a "sane" fractional part resolution if (not int_args) and (not fraction_args): numa, numb = ( Fraction(_no_exp(numa)).limit_denominator(), Fraction(_no_exp(numb)).limit_denominator(), ) while numb: numa, numb = ( numb, (numa % numb if int_args else (numa % numb).limit_denominator()), ) return int(numa) if int_args else (numa if fraction_args else float(numa))
Calculate the greatest common divisor (GCD) of two numbers. :param numa: First number :type numa: number :param numb: Second number :type numb: number :rtype: number For example: >>> import pmisc, fractions >>> pmisc.pgcd(10, 15) 5 >>> str(pmisc.pgcd(0.05, 0.02)) '0.01' >>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6] '0.3333' >>> pmisc.pgcd( ... fractions.Fraction(str(5/3.0)), ... fractions.Fraction(str(2/3.0)) ... ) Fraction(1, 3) >>> pmisc.pgcd( ... fractions.Fraction(5, 3), ... fractions.Fraction(2, 3) ... ) Fraction(1, 3)
def validate_path(path): """Validates the provided path :param path: path to validate (string) :raise: :InvalidUsage: If validation fails. """ if not isinstance(path, six.string_types) or not re.match('^/(?:[._a-zA-Z0-9-]/?)+[^/]$', path): raise InvalidUsage( "Path validation failed - Expected: '/<component>[/component], got: %s" % path ) return True
Validates the provided path :param path: path to validate (string) :raise: :InvalidUsage: If validation fails.
def _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err): """ Raises informative exception for an invalid decomposition """ start = 0 problem_str = [] for ci, chunk in enumerate(chunks): end = start + chunk ant1 = antenna1[start:end] ant2 = antenna2[start:end] cuvw = uvw[start:end] ant1_uvw = ant_uvw[ci, ant1, :] ant2_uvw = ant_uvw[ci, ant2, :] ruvw = ant2_uvw - ant1_uvw # Identifty rows where any of the UVW components differed close = np.isclose(ruvw, cuvw) problems = np.nonzero(np.logical_or.reduce(np.invert(close), axis=1)) for row in problems[0]: problem_str.append("[row %d [%d, %d] (chunk %d)]: " "original %s recovered %s " "ant1 %s ant2 %s" % ( start+row, ant1[row], ant2[row], ci, cuvw[row], ruvw[row], ant1_uvw[row], ant2_uvw[row])) # Exit inner loop early if len(problem_str) >= max_err: break # Exit outer loop early if len(problem_str) >= max_err: break start = end # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna UVW Decomposition Failed", "The following differences were found " "(first 100):"] + problem_str raise AntennaUVWDecompositionError('\n'.join(problem_str))
Raises informative exception for an invalid decomposition
def log_template_errors(logger, log_level=logging.ERROR): """ Decorator to log template errors to the specified logger. @log_template_errors(logging.getLogger('mylogger'), logging.INFO) def my_view(*args): pass Will log template errors at INFO. The default log level is ERROR. """ if not (isinstance(log_level, int) and log_level in logging._levelNames): raise ValueError('Invalid log level %s' % log_level) decorators = [ _log_template_string_if_invalid(logger, log_level), _log_unicode_errors(logger, log_level), _always_strict_resolve, ] if django.VERSION < (1, 8): decorators.append(_patch_invalid_var_format_string) @decorator def function(f, *args, **kwargs): return reduce(__apply, decorators, f)(*args, **kwargs) return function
Decorator to log template errors to the specified logger. @log_template_errors(logging.getLogger('mylogger'), logging.INFO) def my_view(*args): pass Will log template errors at INFO. The default log level is ERROR.
def print_logs(query, types=None): """ Print status logs. """ if query is None: return for run, log in query: print(("{0} @ {1} - {2} id: {3} group: {4} status: {5}".format( run.end, run.experiment_name, run.project_name, run.experiment_group, run.run_group, log.status))) print(("command: {0}".format(run.command))) if "stderr" in types: print("StdErr:") print((log.stderr)) if "stdout" in types: print("StdOut:") print((log.stdout)) print()
Print status logs.
def ncbi_blast(self, db="nr", megablast=True, sequence=None): """ perform an NCBI blast against the sequence of this feature """ import requests requests.defaults.max_retries = 4 assert sequence in (None, "cds", "mrna") seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence)) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', timeout=20, data=dict( PROGRAM="blastn", #EXPECT=2, DESCRIPTIONS=100, ALIGNMENTS=0, FILTER="L", # low complexity CMD="Put", MEGABLAST=True, DATABASE=db, QUERY=">%s\n%s" % (self.name, seq) ) ) if not ("RID =" in r.text and "RTOE" in r.text): print("no results", file=sys.stderr) raise StopIteration rid = r.text.split("RID = ")[1].split("\n")[0] import time time.sleep(4) print("checking...", file=sys.stderr) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", DESCRIPTIONS=100, DATABASE=db, CMD="Get", )) while "Status=WAITING" in r.text: print("checking...", file=sys.stderr) time.sleep(10) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", CMD="Get", )) for rec in _ncbi_parse(r.text): yield rec
perform an NCBI blast against the sequence of this feature
def process_objects(kls): """ Applies default Meta properties. """ # first add a Meta object if not exists if 'Meta' not in kls.__dict__: kls.Meta = type('Meta', (object,), {}) if 'unique_together' not in kls.Meta.__dict__: kls.Meta.unique_together = [] # set verbose_name(s) if not already set if 'verbose_name' not in kls.Meta.__dict__: kls.Meta.verbose_name = kls.__name__ if 'verbose_name_plural' not in kls.Meta.__dict__: kls.Meta.verbose_name_plural = kls.Meta.verbose_name + 's'
Applies default Meta properties.
def map_query(self, variables=None, evidence=None): """ MAP Query method using belief propagation. Note: When multiple variables are passed, it returns the map_query for each of them individually. Parameters ---------- variables: list list of variables for which you want to compute the probability evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence Examples -------- >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.models import BayesianModel >>> from pgmpy.inference import BeliefPropagation >>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ... ('J', 'L'), ('G', 'L')]) >>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]]) >>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]]) >>> cpd_j = TabularCPD('J', 2, ... [[0.9, 0.6, 0.7, 0.1], ... [0.1, 0.4, 0.3, 0.9]], ... ['R', 'A'], [2, 2]) >>> cpd_q = TabularCPD('Q', 2, ... [[0.9, 0.2], ... [0.1, 0.8]], ... ['J'], [2]) >>> cpd_l = TabularCPD('L', 2, ... [[0.9, 0.45, 0.8, 0.1], ... [0.1, 0.55, 0.2, 0.9]], ... ['G', 'J'], [2, 2]) >>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]]) >>> bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g) >>> belief_propagation = BeliefPropagation(bayesian_model) >>> belief_propagation.map_query(variables=['J', 'Q'], ... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) """ # TODO:Check the note in docstring. Change that behavior to return the joint MAP if not variables: variables = set(self.variables) final_distribution = self._query(variables=variables, operation='marginalize', evidence=evidence) # To handle the case when no argument is passed then # _variable_elimination returns a dict. argmax = np.argmax(final_distribution.values) assignment = final_distribution.assignment([argmax])[0] map_query_results = {} for var_assignment in assignment: var, value = var_assignment map_query_results[var] = value if not variables: return map_query_results else: return_dict = {} for var in variables: return_dict[var] = map_query_results[var] return return_dict
MAP Query method using belief propagation. Note: When multiple variables are passed, it returns the map_query for each of them individually. Parameters ---------- variables: list list of variables for which you want to compute the probability evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence Examples -------- >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.models import BayesianModel >>> from pgmpy.inference import BeliefPropagation >>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ... ('J', 'L'), ('G', 'L')]) >>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]]) >>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]]) >>> cpd_j = TabularCPD('J', 2, ... [[0.9, 0.6, 0.7, 0.1], ... [0.1, 0.4, 0.3, 0.9]], ... ['R', 'A'], [2, 2]) >>> cpd_q = TabularCPD('Q', 2, ... [[0.9, 0.2], ... [0.1, 0.8]], ... ['J'], [2]) >>> cpd_l = TabularCPD('L', 2, ... [[0.9, 0.45, 0.8, 0.1], ... [0.1, 0.55, 0.2, 0.9]], ... ['G', 'J'], [2, 2]) >>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]]) >>> bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g) >>> belief_propagation = BeliefPropagation(bayesian_model) >>> belief_propagation.map_query(variables=['J', 'Q'], ... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1})
def clone(self, **kwargs): ''' Clone this context, and return the ChildContextDict ''' child = ChildContextDict(parent=self, threadsafe=self._threadsafe, overrides=kwargs) return child
Clone this context, and return the ChildContextDict
def _expand_shorthand(model_formula, variables): """Expand shorthand terms in the model formula. """ wm = 'white_matter' gsr = 'global_signal' rps = 'trans_x + trans_y + trans_z + rot_x + rot_y + rot_z' fd = 'framewise_displacement' acc = _get_matches_from_data('a_comp_cor_[0-9]+', variables) tcc = _get_matches_from_data('t_comp_cor_[0-9]+', variables) dv = _get_matches_from_data('^std_dvars$', variables) dvall = _get_matches_from_data('.*dvars', variables) nss = _get_matches_from_data('non_steady_state_outlier[0-9]+', variables) spikes = _get_matches_from_data('motion_outlier[0-9]+', variables) model_formula = re.sub('wm', wm, model_formula) model_formula = re.sub('gsr', gsr, model_formula) model_formula = re.sub('rps', rps, model_formula) model_formula = re.sub('fd', fd, model_formula) model_formula = re.sub('acc', acc, model_formula) model_formula = re.sub('tcc', tcc, model_formula) model_formula = re.sub('dv', dv, model_formula) model_formula = re.sub('dvall', dvall, model_formula) model_formula = re.sub('nss', nss, model_formula) model_formula = re.sub('spikes', spikes, model_formula) formula_variables = _get_variables_from_formula(model_formula) others = ' + '.join(set(variables) - set(formula_variables)) model_formula = re.sub('others', others, model_formula) return model_formula
Expand shorthand terms in the model formula.
def action_set(values): """Sets the values to be returned after the action finishes""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd)
Sets the values to be returned after the action finishes
def ellipse_from_second_moments(image, labels, indexes, wants_compactness = False): """Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area """ if len(indexes) == 0: return (np.zeros((0,2)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,)),np.zeros((0,))) i,j = np.argwhere(labels != 0).transpose() return ellipse_from_second_moments_ijv(i,j,image[i,j], labels[i,j], indexes, wants_compactness)
Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area
def Back(self, n = 1, dl = 0): """退格键n次 """ self.Delay(dl) self.keyboard.tap_key(self.keyboard.backspace_key, n)
退格键n次
def _make_valid_state_name(self, state_name): """Transform the input state_name into a valid state in XMLBIF. XMLBIF states must start with a letter an only contain letters, numbers and underscores. """ s = str(state_name) s_fixed = pp.CharsNotIn(pp.alphanums + "_").setParseAction(pp.replaceWith("_")).transformString(s) if not s_fixed[0].isalpha(): s_fixed = "state" + s_fixed return s_fixed
Transform the input state_name into a valid state in XMLBIF. XMLBIF states must start with a letter an only contain letters, numbers and underscores.
def delete(self, option=None): """Delete the current document in the Firestore database. Args: option (Optional[~.firestore_v1beta1.client.WriteOption]): A write option to make assertions / preconditions on the server state of the document before applying changes. Returns: google.protobuf.timestamp_pb2.Timestamp: The time that the delete request was received by the server. If the document did not exist when the delete was sent (i.e. nothing was deleted), this method will still succeed and will still return the time that the request was received by the server. """ write_pb = _helpers.pb_for_delete(self._document_path, option) commit_response = self._client._firestore_api.commit( self._client._database_string, [write_pb], transaction=None, metadata=self._client._rpc_metadata, ) return commit_response.commit_time
Delete the current document in the Firestore database. Args: option (Optional[~.firestore_v1beta1.client.WriteOption]): A write option to make assertions / preconditions on the server state of the document before applying changes. Returns: google.protobuf.timestamp_pb2.Timestamp: The time that the delete request was received by the server. If the document did not exist when the delete was sent (i.e. nothing was deleted), this method will still succeed and will still return the time that the request was received by the server.
def gen_report(report, sdir='./', report_name='report.html'): """ Generates report of derivation and postprocess steps in teneto.derive """ # Create report directory if not os.path.exists(sdir): os.makedirs(sdir) # Add a slash to file directory if not included to avoid DirNameFleName # instead of DirName/FileName being creaated if sdir[-1] != '/': sdir += '/' report_html = '<html><body>' if 'method' in report.keys(): report_html += "<h1>Method: " + report['method'] + "</h1><p>" for i in report[report['method']]: if i == 'taper_window': fig, ax = plt.subplots(1) ax.plot(report[report['method']]['taper_window'], report[report['method']]['taper']) ax.set_xlabel('Window (time). 0 in middle of window.') ax.set_title( 'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).') fig.savefig(sdir + 'taper.png') report_html += "<img src='./taper.png' width=500>" + "<p>" else: report_html += "- <b>" + i + "</b>: " + \ str(report[report['method']][i]) + "<br>" if 'postprocess' in report.keys(): report_html += "<p><h2>Postprocessing:</h2><p>" report_html += "<b>Pipeline: </b>" for i in report['postprocess']: report_html += " " + i + "," for i in report['postprocess']: report_html += "<p><h3>" + i + "</h3><p>" for j in report[i]: if j == 'lambda': report_html += "- <b>" + j + "</b>: " + "<br>" lambda_val = np.array(report['boxcox']['lambda']) fig, ax = plt.subplots(1) ax.hist(lambda_val[:, -1]) ax.set_xlabel('lambda') ax.set_ylabel('frequency') ax.set_title('Histogram of lambda parameter') fig.savefig(sdir + 'boxcox_lambda.png') report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>" report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>" np.savetxt(sdir + "boxcox_lambda.csv", lambda_val, delimiter=",") else: report_html += "- <b>" + j + "</b>: " + \ str(report[i][j]) + "<br>" report_html += '</body></html>' with open(sdir + report_name, 'w') as file: file.write(report_html) file.close()
Generates report of derivation and postprocess steps in teneto.derive
def concretize(x, solver, sym_handler): """ For now a lot of naive concretization is done when handling heap metadata to keep things manageable. This idiom showed up a lot as a result, so to reduce code repetition this function uses a callback to handle the one or two operations that varied across invocations. :param x: the item to be concretized :param solver: the solver to evaluate the item with :param sym_handler: the handler to be used when the item may take on more than one value :returns: a concrete value for the item """ if solver.symbolic(x): try: return solver.eval_one(x) except SimSolverError: return sym_handler(x) else: return solver.eval(x)
For now a lot of naive concretization is done when handling heap metadata to keep things manageable. This idiom showed up a lot as a result, so to reduce code repetition this function uses a callback to handle the one or two operations that varied across invocations. :param x: the item to be concretized :param solver: the solver to evaluate the item with :param sym_handler: the handler to be used when the item may take on more than one value :returns: a concrete value for the item
def _find_statements(self): """Find the statements in `self.code`. Produce a sequence of line numbers that start statements. Recurses into all code objects reachable from `self.code`. """ for bp in self.child_parsers(): # Get all of the lineno information from this code. for _, l in bp._bytes_lines(): yield l
Find the statements in `self.code`. Produce a sequence of line numbers that start statements. Recurses into all code objects reachable from `self.code`.
def _sorted_copy(self, comparison, reversed=False): """ Returns a sorted copy with the colors arranged according to the given comparison. """ sorted = self.copy() _list.sort(sorted, comparison) if reversed: _list.reverse(sorted) return sorted
Returns a sorted copy with the colors arranged according to the given comparison.
def enqueue_command(self, command_name, args, options): """Enqueue a new command into this pipeline.""" assert_open(self) promise = Promise() self.commands.append((command_name, args, options, promise)) return promise
Enqueue a new command into this pipeline.
def has_segment_tables(xmldoc, name = None): """ Return True if the document contains a complete set of segment tables. Returns False otherwise. If name is given and not None then the return value is True only if the document's segment tables, if present, contain a segment list by that name. """ try: names = lsctables.SegmentDefTable.get_table(xmldoc).getColumnByName("name") lsctables.SegmentTable.get_table(xmldoc) lsctables.SegmentSumTable.get_table(xmldoc) except (ValueError, KeyError): return False return name is None or name in names
Return True if the document contains a complete set of segment tables. Returns False otherwise. If name is given and not None then the return value is True only if the document's segment tables, if present, contain a segment list by that name.
def recommend(self, users=None, k=10, exclude=None, items=None, new_observation_data=None, new_user_data=None, new_item_data=None, exclude_known=True, diversity=0, random_seed=None, verbose=True): """ Recommend the ``k`` highest scored items for each user. Parameters ---------- users : SArray, SFrame, or list, optional Users or observation queries for which to make recommendations. For list, SArray, and single-column inputs, this is simply a set of user IDs. By default, recommendations are returned for all users present when the model was trained. However, if the recommender model was created with additional features in the ``observation_data`` SFrame, then a corresponding SFrame of observation queries -- observation data without item or target columns -- can be passed to this method. For example, a model trained with user ID, item ID, time, and rating columns may be queried using an SFrame with user ID and time columns. In this case, the user ID column must be present, and all column names should match those in the ``observation_data`` SFrame passed to ``create.`` k : int, optional The number of recommendations to generate for each user. items : SArray, SFrame, or list, optional Restricts the items from which recommendations can be made. If ``items`` is an SArray, list, or SFrame with a single column, only items from the given set will be recommended. This can be used, for example, to restrict the recommendations to items within a particular category or genre. If ``items`` is an SFrame with user ID and item ID columns, then the item restriction is specialized to each user. For example, if ``items`` contains 3 rows with user U1 -- (U1, I1), (U1, I2), and (U1, I3) -- then the recommendations for user U1 are chosen from items I1, I2, and I3. By default, recommendations are made from all items present when the model was trained. new_observation_data : SFrame, optional ``new_observation_data`` gives additional observation data to the model, which may be used by the models to improve score and recommendation accuracy. Must be in the same format as the observation data passed to ``create``. How this data is used varies by model. new_user_data : SFrame, optional ``new_user_data`` may give additional user data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the user data passed to ``create``. new_item_data : SFrame, optional ``new_item_data`` may give additional item data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the item data passed to ``create``. exclude : SFrame, optional An :class:`~turicreate.SFrame` of user / item pairs. The column names must be equal to the user and item columns of the main data, and it provides the model with user/item pairs to exclude from the recommendations. These user-item-pairs are always excluded from the predictions, even if exclude_known is False. exclude_known : bool, optional By default, all user-item interactions previously seen in the training data, or in any new data provided using new_observation_data.., are excluded from the recommendations. Passing in ``exclude_known = False`` overrides this behavior. diversity : non-negative float, optional If given, then the recommend function attempts chooses a set of `k` items that are both highly scored and different from other items in that set. It does this by first retrieving ``k*(1+diversity)`` recommended items, then randomly choosing a diverse set from these items. Suggested values for diversity are between 1 and 3. random_seed : int, optional If diversity is larger than 0, then some randomness is used; this controls the random seed to use for randomization. If None, will be different each time. verbose : bool, optional If True, print the progress of generating recommendation. Returns ------- out : SFrame A SFrame with the top ranked items for each user. The columns are: ``user_id``, ``item_id``, *score*, and *rank*, where ``user_id`` and ``item_id`` match the user and item column names specified at training time. The rank column is between 1 and ``k`` and gives the relative score of that item. The value of score depends on the method used for recommendations. See Also -------- recommend_from_interactions predict evaluate """ from turicreate._cython.cy_server import QuietProgress assert type(k) == int column_types = self._get_data_schema() user_id = self.user_id item_id = self.item_id user_type = column_types[user_id] item_type = column_types[item_id] __null_sframe = _SFrame() if users is None: users = __null_sframe if exclude is None: exclude = __null_sframe if items is None: items = __null_sframe if new_observation_data is None: new_observation_data = __null_sframe if new_user_data is None: new_user_data = __null_sframe if new_item_data is None: new_item_data = __null_sframe if isinstance(users, list) or (_HAS_NUMPY and isinstance(users, _numpy.ndarray)): users = _SArray(users) # allow to take a list of dictionaries of the form [{'user_id':1,'time':10}] etc. if users.dtype == dict: users = users.unpack(column_name_prefix='') if isinstance(users, _SArray): users = _SFrame({user_id: users}) if isinstance(items, list) or (_HAS_NUMPY and isinstance(items, _numpy.ndarray)): items = _SArray(items, dtype = item_type) if isinstance(items, _SArray): items = _SFrame({item_id: items}) # Check type of incoming data. def check_type(arg, arg_name, required_type, allowed_types): if not isinstance(arg, required_type): raise TypeError("Parameter " + arg_name + " must be of type(s) " + (", ".join(allowed_types)) + "; Type '" + str(type(arg)) + "' not recognized.") check_type(users, "users", _SFrame, ["SArray", "list", "SFrame", "numpy.ndarray"]) check_type(exclude, "exclude", _SFrame, ["SFrame"]) check_type(items, "items", _SFrame, ["SFrame", "SArray", "list", "numpy.ndarray"]) check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"]) check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"]) check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"]) # See if we are in the situation where there are no users # originally. In this case, the default type of the user # column is string, so we have to be mindful of that when # making recommendations and possibly cast it to string if # needed. # The only case where we need to deal with the user id is when # it's used to link with rated items in new_observation_data, # thus handle that case explicitly and error out in others. cast_user_to_string_type = False if self.num_users == 0: cast_user_to_string_type = True if users.num_rows() != 0: # In this case, the user column has actually been set to a # string type, so we need to make sure that we cast # everything back and forth to that to preserve type. if new_observation_data.num_rows() == 0: raise ValueError("When users are not specified with the model, " "new_observation_data must be set in order to make recommendations.") new_observation_data[user_id] = new_observation_data[user_id].astype(user_type) else: print("WARNING: No users specified to model at creation time, so " "calling recommend() for all users returns empty SFrame.") # Cast to the appropriate type if necessary. if users.num_rows() != 0: try: user_column = users[user_id] except RuntimeError: raise _ToolkitError("User column '%s' not present in input user data." % user_id) if cast_user_to_string_type: assert new_observation_data.num_rows() != 0 original_user_type = user_column.dtype users[user_id] = user_column.astype(str) user_type=str elif user_column.dtype != user_type: users[user_id] = user_column.astype(user_type) # Cast user specified in exclude to the appropriate type if necessary. if user_id in exclude.column_names() and exclude[user_id].dtype!=user_type: exclude[user_id] = exclude[user_id].astype(user_type) try: diversity = float(diversity) except Exception: raise TypeError("Parameter diversity must be a floating point value equal to or larger than 0.") if diversity < 0: raise TypeError("Parameter diversity must be a floating point value equal to or larger than 0.") if random_seed is None: random_seed = hash("%.20f" % _time.time()) else: try: random_seed = int(random_seed) except TypeError: raise TypeError("random_seed must be integer.") opt = {'model': self.__proxy__, 'query': users, 'top_k': k, 'exclude': exclude, 'restrictions': items, 'new_data': new_observation_data, 'new_user_data': new_user_data, 'new_item_data': new_item_data, 'exclude_known': exclude_known, 'diversity' : diversity, 'random_seed' : random_seed } with QuietProgress(verbose): recs = self.__proxy__.recommend(users, exclude, items, new_observation_data, new_user_data, new_item_data, exclude_known, k, diversity, random_seed) if cast_user_to_string_type: recs[user_id] = recs[user_id].astype(original_user_type) return recs
Recommend the ``k`` highest scored items for each user. Parameters ---------- users : SArray, SFrame, or list, optional Users or observation queries for which to make recommendations. For list, SArray, and single-column inputs, this is simply a set of user IDs. By default, recommendations are returned for all users present when the model was trained. However, if the recommender model was created with additional features in the ``observation_data`` SFrame, then a corresponding SFrame of observation queries -- observation data without item or target columns -- can be passed to this method. For example, a model trained with user ID, item ID, time, and rating columns may be queried using an SFrame with user ID and time columns. In this case, the user ID column must be present, and all column names should match those in the ``observation_data`` SFrame passed to ``create.`` k : int, optional The number of recommendations to generate for each user. items : SArray, SFrame, or list, optional Restricts the items from which recommendations can be made. If ``items`` is an SArray, list, or SFrame with a single column, only items from the given set will be recommended. This can be used, for example, to restrict the recommendations to items within a particular category or genre. If ``items`` is an SFrame with user ID and item ID columns, then the item restriction is specialized to each user. For example, if ``items`` contains 3 rows with user U1 -- (U1, I1), (U1, I2), and (U1, I3) -- then the recommendations for user U1 are chosen from items I1, I2, and I3. By default, recommendations are made from all items present when the model was trained. new_observation_data : SFrame, optional ``new_observation_data`` gives additional observation data to the model, which may be used by the models to improve score and recommendation accuracy. Must be in the same format as the observation data passed to ``create``. How this data is used varies by model. new_user_data : SFrame, optional ``new_user_data`` may give additional user data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the user data passed to ``create``. new_item_data : SFrame, optional ``new_item_data`` may give additional item data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the item data passed to ``create``. exclude : SFrame, optional An :class:`~turicreate.SFrame` of user / item pairs. The column names must be equal to the user and item columns of the main data, and it provides the model with user/item pairs to exclude from the recommendations. These user-item-pairs are always excluded from the predictions, even if exclude_known is False. exclude_known : bool, optional By default, all user-item interactions previously seen in the training data, or in any new data provided using new_observation_data.., are excluded from the recommendations. Passing in ``exclude_known = False`` overrides this behavior. diversity : non-negative float, optional If given, then the recommend function attempts chooses a set of `k` items that are both highly scored and different from other items in that set. It does this by first retrieving ``k*(1+diversity)`` recommended items, then randomly choosing a diverse set from these items. Suggested values for diversity are between 1 and 3. random_seed : int, optional If diversity is larger than 0, then some randomness is used; this controls the random seed to use for randomization. If None, will be different each time. verbose : bool, optional If True, print the progress of generating recommendation. Returns ------- out : SFrame A SFrame with the top ranked items for each user. The columns are: ``user_id``, ``item_id``, *score*, and *rank*, where ``user_id`` and ``item_id`` match the user and item column names specified at training time. The rank column is between 1 and ``k`` and gives the relative score of that item. The value of score depends on the method used for recommendations. See Also -------- recommend_from_interactions predict evaluate
def get_collections_for_image(self, image_id): """Get identifier of all collections that contain a given image. Parameters ---------- image_id : string Unique identifierof image object Returns ------- List(string) List of image collection identifier """ result = [] # Get all active collections that contain the image identifier for document in self.collection.find({'active' : True, 'images.identifier' : image_id}): result.append(str(document['_id'])) return result
Get identifier of all collections that contain a given image. Parameters ---------- image_id : string Unique identifierof image object Returns ------- List(string) List of image collection identifier
def update(self): """Update the data from the thermostat. Always sets the current time.""" _LOGGER.debug("Querying the device..") time = datetime.now() value = struct.pack('BBBBBBB', PROP_INFO_QUERY, time.year % 100, time.month, time.day, time.hour, time.minute, time.second) self._conn.make_request(PROP_WRITE_HANDLE, value)
Update the data from the thermostat. Always sets the current time.
def run_processes(self, procdetails: List[ProcessDetails], subproc_run_timeout_sec: float = 1, stop_event_timeout_ms: int = 1000, kill_timeout_sec: float = 5) -> None: """ Run multiple child processes. Args: procdetails: list of :class:`ProcessDetails` objects (q.v.) subproc_run_timeout_sec: time (in seconds) to wait for each process when polling child processes to see how they're getting on (default ``1``) stop_event_timeout_ms: time to wait (in ms) while checking the Windows stop event for this service (default ``1000``) kill_timeout_sec: how long (in seconds) will we wait for the subprocesses to end peacefully, before we try to kill them? .. todo:: cardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED: Windows service autorestart """ # https://stackoverflow.com/questions/16333054 def cleanup(): self.debug("atexit function called: cleaning up") for pmgr_ in self.process_managers: pmgr_.stop() atexit.register(cleanup) # Set up process info self.process_managers = [] # type: List[ProcessManager] n = len(procdetails) for i, details in enumerate(procdetails): pmgr = ProcessManager(details, i + 1, n, kill_timeout_sec=kill_timeout_sec, debugging=self.debugging) self.process_managers.append(pmgr) # Start processes for pmgr in self.process_managers: pmgr.start() self.info("All started") # Run processes something_running = True stop_requested = False subproc_failed = False while something_running and not stop_requested and not subproc_failed: if (win32event.WaitForSingleObject( self.h_stop_event, stop_event_timeout_ms) == win32event.WAIT_OBJECT_0): stop_requested = True self.info("Stop requested; stopping") else: something_running = False for pmgr in self.process_managers: if subproc_failed: break try: retcode = pmgr.wait(timeout_s=subproc_run_timeout_sec) if retcode != 0: subproc_failed = True except subprocess.TimeoutExpired: something_running = True # Kill any outstanding processes # # (a) Slow way # for pmgr in self.process_managers: # pmgr.stop() # # (b) Faster (slightly more parallel) way # for pmgr in self.process_managers: # pmgr.terminate() # for pmgr in self.process_managers: # pmgr.stop_having_terminated() # # ... No, it's bad if we leave things orphaned. # Let's go for slow, clean code. for pmgr in self.process_managers: pmgr.stop() self.info("All stopped")
Run multiple child processes. Args: procdetails: list of :class:`ProcessDetails` objects (q.v.) subproc_run_timeout_sec: time (in seconds) to wait for each process when polling child processes to see how they're getting on (default ``1``) stop_event_timeout_ms: time to wait (in ms) while checking the Windows stop event for this service (default ``1000``) kill_timeout_sec: how long (in seconds) will we wait for the subprocesses to end peacefully, before we try to kill them? .. todo:: cardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED: Windows service autorestart