positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def vector_dot(vector1, vector2): """ Computes the dot-product of the input vectors. :param vector1: input vector 1 :type vector1: list, tuple :param vector2: input vector 2 :type vector2: list, tuple :return: result of the dot product :rtype: float """ try: if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0: raise ValueError("Input vectors cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise # Compute dot product prod = 0.0 for v1, v2 in zip(vector1, vector2): prod += v1 * v2 # Return the dot product of the input vectors return prod
Computes the dot-product of the input vectors. :param vector1: input vector 1 :type vector1: list, tuple :param vector2: input vector 2 :type vector2: list, tuple :return: result of the dot product :rtype: float
def connect(self, keyfile=None): """Connect to the node via ssh using the paramiko library. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure """ ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if keyfile and os.path.exists(keyfile): ssh.load_host_keys(keyfile) # Try connecting using the `preferred_ip`, if # present. Otherwise, try all of them and set `preferred_ip` # using the first that is working. ips = self.ips[:] # This is done in order to "sort" the IPs and put the preferred_ip first. if self.preferred_ip: if self.preferred_ip in ips: ips.remove(self.preferred_ip) else: # Preferred is changed? log.debug("IP %s does not seem to belong to %s anymore. Ignoring!", self.preferred_ip, self.name) self.preferred_ip = ips[0] for ip in itertools.chain([self.preferred_ip], ips): if not ip: continue try: log.debug("Trying to connect to host %s (%s)", self.name, ip) addr, port = parse_ip_address_and_port(ip, SSH_PORT) ssh.connect(str(addr), username=self.image_user, allow_agent=True, key_filename=self.user_key_private, timeout=Node.connection_timeout, port=port) log.debug("Connection to %s succeeded on port %d!", ip, port) if ip != self.preferred_ip: log.debug("Setting `preferred_ip` to %s", ip) self.preferred_ip = ip # Connection successful. return ssh except socket.error as ex: log.debug("Host %s (%s) not reachable: %s.", self.name, ip, ex) except paramiko.BadHostKeyException as ex: log.error("Invalid host key: host %s (%s); check keyfile: %s", self.name, ip, keyfile) except paramiko.SSHException as ex: log.debug("Ignoring error %s connecting to %s", str(ex), self.name) return None
Connect to the node via ssh using the paramiko library. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure
def end(self, folder=None): """End the simulation and destroy the current simulation environment. """ ret = self.env.destroy(folder=folder) self._end_time = time.time() self._log(logging.DEBUG, "Simulation run with {} steps took {:.3f}s to" " complete, while actual processing time was {:.3f}s." .format(self.age, self._end_time - self._start_time, self._processing_time)) return ret
End the simulation and destroy the current simulation environment.
def marshall(self): """Return the measurement in the line protocol format. :rtype: str """ return '{},{} {} {}'.format( self._escape(self.name), ','.join(['{}={}'.format(self._escape(k), self._escape(v)) for k, v in self.tags.items()]), self._marshall_fields(), int(self.timestamp * 1000))
Return the measurement in the line protocol format. :rtype: str
def _download_mirbase(args, version="CURRENT"): """ Download files from mirbase """ if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna
Download files from mirbase
def total_amount(qs) -> Total: """Sums the amounts of the objects in the queryset, keeping each currency separate. :param qs: A querystring containing objects that have an amount field of type Money. :return: A Total object. """ aggregate = qs.values('amount_currency').annotate(sum=Sum('amount')) return Total(Money(amount=r['sum'], currency=r['amount_currency']) for r in aggregate)
Sums the amounts of the objects in the queryset, keeping each currency separate. :param qs: A querystring containing objects that have an amount field of type Money. :return: A Total object.
def aa3_to_aa1(seq): """convert string of 3-letter amino acids to 1-letter amino acids >>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu") 'CATSARELAME' >>> aa3_to_aa1(None) """ if seq is None: return None return "".join(aa3_to_aa1_lut[aa3] for aa3 in [seq[i:i + 3] for i in range(0, len(seq), 3)])
convert string of 3-letter amino acids to 1-letter amino acids >>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu") 'CATSARELAME' >>> aa3_to_aa1(None)
def next(transport, wizard, step, data): """ Validate step and go to the next one (or finish the wizard) :param transport: Transport object :param wizard: Wizard block name :param step: Current step number :param data: form data for the step """ step = int(step) wizard = blocks.get(wizard) # Retrieve form block form = wizard.next(step) valid = forms.send(transport, form.register_name, data=data) if valid: if wizard.next(step+1) is None: # It was last step wizard.finish(transport) return # Next step wizard.step = step+1 wizard.update(transport)
Validate step and go to the next one (or finish the wizard) :param transport: Transport object :param wizard: Wizard block name :param step: Current step number :param data: form data for the step
def validate_line_list(dist, attr, value): """ Validate that the value is compatible """ # does not work as reliably in Python 2. if isinstance(value, str): value = value.split() value = list(value) try: check = (' '.join(value)).split() if check == value: return True except Exception: pass raise DistutilsSetupError("%r must be a list of valid identifiers" % attr)
Validate that the value is compatible
def modify_url_for_impersonation(cls, url, impersonate_user, username): """ Modify the SQL Alchemy URL object with the user to impersonate if applicable. :param url: SQLAlchemy URL object :param impersonate_user: Bool indicating if impersonation is enabled :param username: Effective username """ if impersonate_user is not None and username is not None: url.username = username
Modify the SQL Alchemy URL object with the user to impersonate if applicable. :param url: SQLAlchemy URL object :param impersonate_user: Bool indicating if impersonation is enabled :param username: Effective username
def Detect(self, baseline, host_data): """Run host_data through detectors and return them if a detector triggers. Args: baseline: The base set of rdf values used to evaluate whether an issue exists. host_data: The rdf values passed back by the filters. Returns: A CheckResult message containing anomalies if any detectors identified an issue, None otherwise. """ result = CheckResult() for detector in self.detectors: finding = detector(baseline, host_data) if finding: result.ExtendAnomalies([finding]) if result: return result
Run host_data through detectors and return them if a detector triggers. Args: baseline: The base set of rdf values used to evaluate whether an issue exists. host_data: The rdf values passed back by the filters. Returns: A CheckResult message containing anomalies if any detectors identified an issue, None otherwise.
def get_batch(self): """Returns the Batch """ context = self.context parent = api.get_parent(context) if context.portal_type == "Batch": return context elif parent.portal_type == "Batch": return parent return None
Returns the Batch
def generate_map(map, name='url_map'): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ from warnings import warn warn(DeprecationWarning('This module is deprecated')) map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ 'is_dynamic': is_dynamic, 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in iteritems(rule._converters): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except ValueError: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u'endpoint': rule.endpoint, u'arguments': list(rule.arguments), u'converters': rule_converters, u'trace': trace, u'defaults': rule.defaults }) return render_template(name_parts=name and name.split('.') or [], rules=dumps(rules), converters=converters)
Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation!
def known_dists(): '''Return a list of all Distributions exporting udata.* entrypoints''' return ( dist for dist in pkg_resources.working_set if any(k in ENTRYPOINTS for k in dist.get_entry_map().keys()) )
Return a list of all Distributions exporting udata.* entrypoints
def modify_cache_parameter_group(name, region=None, key=None, keyid=None, profile=None, **args): ''' Update a cache parameter group in place. Note that due to a design limitation in AWS, this function is not atomic -- a maximum of 20 params may be modified in one underlying boto call. This means that if more than 20 params need to be changed, the update is performed in blocks of 20, which in turns means that if a later sub-call fails after an earlier one has succeeded, the overall update will be left partially applied. CacheParameterGroupName The name of the cache parameter group to modify. ParameterNameValues A [list] of {dicts}, each composed of a parameter name and a value, for the parameter update. At least one parameter/value pair is required. .. code-block:: yaml ParameterNameValues: - ParameterName: timeout # Amazon requires ALL VALUES to be strings... ParameterValue: "30" - ParameterName: appendonly # The YAML parser will turn a bare `yes` into a bool, which Amazon will then throw on... ParameterValue: "yes" Example: .. code-block:: bash salt myminion boto3_elasticache.modify_cache_parameter_group \ CacheParameterGroupName=myParamGroup \ ParameterNameValues='[ { ParameterName: timeout, ParameterValue: "30" }, { ParameterName: appendonly, ParameterValue: "yes" } ]' ''' args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) try: Params = args['ParameterNameValues'] except ValueError as e: raise SaltInvocationError('Invalid `ParameterNameValues` structure passed.') while Params: args.update({'ParameterNameValues': Params[:20]}) Params = Params[20:] if not _modify_resource(name, name_param='CacheParameterGroupName', desc='cache parameter group', res_type='cache_parameter_group', region=region, key=key, keyid=keyid, profile=profile, **args): return False return True
Update a cache parameter group in place. Note that due to a design limitation in AWS, this function is not atomic -- a maximum of 20 params may be modified in one underlying boto call. This means that if more than 20 params need to be changed, the update is performed in blocks of 20, which in turns means that if a later sub-call fails after an earlier one has succeeded, the overall update will be left partially applied. CacheParameterGroupName The name of the cache parameter group to modify. ParameterNameValues A [list] of {dicts}, each composed of a parameter name and a value, for the parameter update. At least one parameter/value pair is required. .. code-block:: yaml ParameterNameValues: - ParameterName: timeout # Amazon requires ALL VALUES to be strings... ParameterValue: "30" - ParameterName: appendonly # The YAML parser will turn a bare `yes` into a bool, which Amazon will then throw on... ParameterValue: "yes" Example: .. code-block:: bash salt myminion boto3_elasticache.modify_cache_parameter_group \ CacheParameterGroupName=myParamGroup \ ParameterNameValues='[ { ParameterName: timeout, ParameterValue: "30" }, { ParameterName: appendonly, ParameterValue: "yes" } ]'
def release(): "check release before upload to PyPI" sh("paver bdist_wheel") wheels = path("dist").files("*.whl") if not wheels: error("\n*** ERROR: No release wheel was built!") sys.exit(1) if any(".dev" in i for i in wheels): error("\n*** ERROR: You're still using a 'dev' version!") sys.exit(1) # Check that source distribution can be built and is complete print print "~~~ TESTING SOURCE BUILD".ljust(78, '~') sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/" " && /usr/bin/python setup.py sdist >/dev/null" " && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }" " | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then" " echo '^^^ Difference in file lists! ^^^'; false;" " else true; fi; } 2>&1" % tuple([project["name"], version] * 4) ) path("dist/%s-%s" % (project["name"], version)).rmtree() print "~" * 78 print print "~~~ sdist vs. git ".ljust(78, '~') subprocess.call( "unzip -v dist/pyrocore-*.zip | egrep '^ .+/' | cut -f2- -d/ | sort >./build/ls-sdist.txt" " && git ls-files | sort >./build/ls-git.txt" " && $(which colordiff || echo diff) -U0 ./build/ls-sdist.txt ./build/ls-git.txt || true", shell=True) print "~" * 78 print print "Created", " ".join([str(i) for i in path("dist").listdir()]) print "Use 'paver sdist bdist_wheel' to build the release and" print " 'twine upload dist/*.{zip,whl}' to upload to PyPI" print "Use 'paver dist_docs' to prepare an API documentation upload"
check release before upload to PyPI
def extract_vars(template): """ Extract variables from template. Variables are enclosed in double curly braces. """ keys = set() for match in re.finditer(r"\{\{ (?P<key>\w+) \}\}", template.getvalue()): keys.add(match.groups()[0]) return sorted(list(keys))
Extract variables from template. Variables are enclosed in double curly braces.
def get_interface_switchport_output_switchport_acceptable_frame_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_switchport = ET.Element("get_interface_switchport") config = get_interface_switchport output = ET.SubElement(get_interface_switchport, "output") switchport = ET.SubElement(output, "switchport") interface_type_key = ET.SubElement(switchport, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(switchport, "interface-name") interface_name_key.text = kwargs.pop('interface_name') acceptable_frame_type = ET.SubElement(switchport, "acceptable-frame-type") acceptable_frame_type.text = kwargs.pop('acceptable_frame_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def upper_diag_self_prodx(list_): """ upper diagnoal of cartesian product of self and self. Weird name. fixme Args: list_ (list): Returns: list: CommandLine: python -m utool.util_alg --exec-upper_diag_self_prodx Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> list_ = [1, 2, 3] >>> result = upper_diag_self_prodx(list_) >>> print(result) [(1, 2), (1, 3), (2, 3)] """ return [(item1, item2) for n1, item1 in enumerate(list_) for n2, item2 in enumerate(list_) if n1 < n2]
upper diagnoal of cartesian product of self and self. Weird name. fixme Args: list_ (list): Returns: list: CommandLine: python -m utool.util_alg --exec-upper_diag_self_prodx Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> list_ = [1, 2, 3] >>> result = upper_diag_self_prodx(list_) >>> print(result) [(1, 2), (1, 3), (2, 3)]
def nl_complete_msg(sk, msg): """Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance). """ nlh = msg.nm_nlh if nlh.nlmsg_pid == NL_AUTO_PORT: nlh.nlmsg_pid = nl_socket_get_local_port(sk) if nlh.nlmsg_seq == NL_AUTO_SEQ: nlh.nlmsg_seq = sk.s_seq_next sk.s_seq_next += 1 if msg.nm_protocol == -1: msg.nm_protocol = sk.s_proto nlh.nlmsg_flags |= NLM_F_REQUEST if not sk.s_flags & NL_NO_AUTO_ACK: nlh.nlmsg_flags |= NLM_F_ACK
Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance).
def update_user(self, user_id, **kwargs): """Update a user.""" body = self._formdata(kwargs, FastlyUser.FIELDS) content = self._fetch("/user/%s" % user_id, method="PUT", body=body) return FastlyUser(self, content)
Update a user.
def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False): """ Generate a kernel for a point-source. Parameters ---------- psf : `~fermipy.irfs.PSFModel` npix : int Number of pixels in X and Y dimensions. cdelt : float Pixel size in degrees. """ egy = psf.energies x = make_pixel_distance(npix, xpix, ypix) x *= cdelt k = np.zeros((len(egy), npix, npix)) for i in range(len(egy)): k[i] = psf.eval(i, x, scale_fn=psf_scale_fn) if normalize: k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2) return k
Generate a kernel for a point-source. Parameters ---------- psf : `~fermipy.irfs.PSFModel` npix : int Number of pixels in X and Y dimensions. cdelt : float Pixel size in degrees.
def _bundleable(desc): """Creates a function that transforms an API call into a bundling call. It transform a_func from an API call that receives the requests and returns the response into a callable that receives the same request, and returns a :class:`bundling.Event`. The returned Event object can be used to obtain the eventual result of the bundled call. Args: desc (gax.BundleDescriptor): describes the bundling that a_func supports. Returns: Callable: takes the API call's request and keyword args and returns a bundling.Event object. """ def inner(a_func, settings, request, **kwargs): """Schedules execution of a bundling task.""" if not settings.bundler: return a_func(request, **kwargs) the_id = bundling.compute_bundle_id( request, desc.request_discriminator_fields) return settings.bundler.schedule(a_func, the_id, desc, request, kwargs) return inner
Creates a function that transforms an API call into a bundling call. It transform a_func from an API call that receives the requests and returns the response into a callable that receives the same request, and returns a :class:`bundling.Event`. The returned Event object can be used to obtain the eventual result of the bundled call. Args: desc (gax.BundleDescriptor): describes the bundling that a_func supports. Returns: Callable: takes the API call's request and keyword args and returns a bundling.Event object.
def getStickXY(TableName): """ Get X and Y for fine plotting of a stick spectrum. Usage: X,Y = getStickXY(TableName). """ cent,intens = getColumns(TableName,('nu','sw')) n = len(cent) cent_ = zeros(n*3) intens_ = zeros(n*3) for i in range(n): intens_[3*i] = 0 intens_[3*i+1] = intens[i] intens_[3*i+2] = 0 cent_[(3*i):(3*i+3)] = cent[i] return cent_,intens_
Get X and Y for fine plotting of a stick spectrum. Usage: X,Y = getStickXY(TableName).
def sinwave(n=4,inc=.25): """ Returns a DataFrame with the required format for a surface (sine wave) plot Parameters: ----------- n : int Ranges for X and Y axis (-n,n) n_y : int Size of increment along the axis """ x=np.arange(-n,n,inc) y=np.arange(-n,n,inc) X,Y=np.meshgrid(x,y) R = np.sqrt(X**2 + Y**2) Z = np.sin(R)/(.5*R) return pd.DataFrame(Z,index=x,columns=y)
Returns a DataFrame with the required format for a surface (sine wave) plot Parameters: ----------- n : int Ranges for X and Y axis (-n,n) n_y : int Size of increment along the axis
def _oversized_subqueries( self, coordinate, radiusArcsec): """ *subdivide oversized query* **Key Arguments:** # - **Return:** - None .. todo:: - @review: when complete, clean _oversized_subqueries method - @review: when complete add logging """ self.log.info('starting the ``_oversized_subqueries`` method') import math smallerRadiusArcsec = radiusArcsec / 2. print "Calculating 7 sub-disks for coordinates %(coordinate)s, with smaller search radius of %(smallerRadiusArcsec)s arcsec" % locals() ra = coordinate[0] dec = coordinate[1] shifts = [ (0, 0), (0, math.sqrt(3.) / 2.), (3. / 4., math.sqrt(3.) / 4.), (3. / 4., -math.sqrt(3.) / 4.), (0, -math.sqrt(3.) / 2.), (-3. / 4., -math.sqrt(3.) / 4.), (-3. / 4., math.sqrt(3.) / 4.) ] subDiskCoordinates = [] count = 0 for s in shifts: x1 = ra + s[0] * radiusArcsec / (60 * 60) y1 = dec + s[1] * radiusArcsec / (60 * 60) subDiskCoordinates.append((x1, y1)) names, searchParams = self.get_crossmatch_names( listOfCoordinates=subDiskCoordinates, radiusArcsec=smallerRadiusArcsec ) self.log.info('completed the ``_oversized_subqueries`` method') return names, searchParams
*subdivide oversized query* **Key Arguments:** # - **Return:** - None .. todo:: - @review: when complete, clean _oversized_subqueries method - @review: when complete add logging
def estimate_frequency_for_zero(self, sample_rate: float, nbits=42) -> float: """ Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies :param nbits: :return: """ return self.__estimate_frequency_for_bit(False, sample_rate, nbits)
Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies :param nbits: :return:
def all_devices(cl_device_type=None, platform=None): """Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments. """ if isinstance(cl_device_type, str): cl_device_type = device_type_from_string(cl_device_type) runtime_list = [] if platform is None: platforms = cl.get_platforms() else: platforms = [platform] for platform in platforms: if cl_device_type: devices = platform.get_devices(device_type=cl_device_type) else: devices = platform.get_devices() for device in devices: if device_supports_double(device): env = CLEnvironment(platform, device) runtime_list.append(env) return runtime_list
Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments.
def read_umi_tools(filename: PathLike, dtype: str='float32') -> AnnData: """Read a gzipped condensed count matrix from umi_tools. Parameters ---------- filename File name to read from. """ # import pandas for conversion of a dict of dicts into a matrix # import gzip to read a gzipped file :-) import gzip from pandas import DataFrame dod = {} # this will contain basically everything fh = gzip.open(fspath(filename)) header = fh.readline() # read the first line for line in fh: t = line.decode('ascii').split('\t') # gzip read bytes, hence the decoding try: dod[t[1]].update({t[0]:int(t[2])}) except KeyError: dod[t[1]] = {t[0]:int(t[2])} df = DataFrame.from_dict(dod, orient='index') # build the matrix df.fillna(value=0., inplace=True) # many NaN, replace with zeros return AnnData(np.array(df), {'obs_names': df.index}, {'var_names': df.columns}, dtype=dtype)
Read a gzipped condensed count matrix from umi_tools. Parameters ---------- filename File name to read from.
def update(self, callback_method=values.unset, callback_url=values.unset, friendly_name=values.unset): """ Update the TriggerInstance :param unicode callback_method: The HTTP method to use to call callback_url :param unicode callback_url: The URL we call when the trigger fires :param unicode friendly_name: A string to describe the resource :returns: Updated TriggerInstance :rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance """ return self._proxy.update( callback_method=callback_method, callback_url=callback_url, friendly_name=friendly_name, )
Update the TriggerInstance :param unicode callback_method: The HTTP method to use to call callback_url :param unicode callback_url: The URL we call when the trigger fires :param unicode friendly_name: A string to describe the resource :returns: Updated TriggerInstance :rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
def _python_to_mod_new(changes: Changeset) -> Dict[str, List[List[bytes]]]: """ Convert a LdapChanges object to a modlist for add operation. """ table: LdapObjectClass = type(changes.src) fields = table.get_fields() result: Dict[str, List[List[bytes]]] = {} for name, field in fields.items(): if field.db_field: try: value = field.to_db(changes.get_value_as_list(name)) if len(value) > 0: result[name] = value except ValidationError as e: raise ValidationError(f"{name}: {e}.") return result
Convert a LdapChanges object to a modlist for add operation.
def pow(self, other, axis="columns", level=None, fill_value=None): """Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied. """ return self._binary_op( "pow", other, axis=axis, level=level, fill_value=fill_value )
Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied.
def set_wd_noise(self, wd_noise): """Add White Dwarf Background Noise This adds the White Dwarf (WD) Background noise. This can either do calculations with, without, or with and without WD noise. Args: wd_noise (bool or str, optional): Add or remove WD background noise. First option is to have only calculations with the wd_noise. For this, use `yes` or True. Second option is no WD noise. For this, use `no` or False. For both calculations with and without WD noise, use `both`. Raises: ValueError: Input value is not one of the options. """ if isinstance(wd_noise, bool): wd_noise = str(wd_noise) if wd_noise.lower() == 'yes' or wd_noise.lower() == 'true': wd_noise = 'True' elif wd_noise.lower() == 'no' or wd_noise.lower() == 'false': wd_noise = 'False' elif wd_noise.lower() == 'both': wd_noise = 'Both' else: raise ValueError('wd_noise must be yes, no, True, False, or Both.') self.sensitivity_input.add_wd_noise = wd_noise return
Add White Dwarf Background Noise This adds the White Dwarf (WD) Background noise. This can either do calculations with, without, or with and without WD noise. Args: wd_noise (bool or str, optional): Add or remove WD background noise. First option is to have only calculations with the wd_noise. For this, use `yes` or True. Second option is no WD noise. For this, use `no` or False. For both calculations with and without WD noise, use `both`. Raises: ValueError: Input value is not one of the options.
def populate(self, **values): """Populate values to fields. Skip non-existing.""" values = values.copy() fields = list(self.iterate_with_name()) for _, structure_name, field in fields: if structure_name in values: field.__set__(self, values.pop(structure_name)) for name, _, field in fields: if name in values: field.__set__(self, values.pop(name))
Populate values to fields. Skip non-existing.
def __reset_unique_identities(self): """Clear identities relationships and enrollments data""" self.log("Reseting unique identities...") self.log("Clearing identities relationships") nids = 0 uidentities = api.unique_identities(self.db) for uidentity in uidentities: for identity in uidentity.identities: api.move_identity(self.db, identity.id, identity.id) nids += 1 self.log("Relationships cleared for %s identities" % nids) self.log("Clearing enrollments") with self.db.connect() as session: enrollments = session.query(Enrollment).all() for enr in enrollments: session.delete(enr) self.log("Enrollments cleared")
Clear identities relationships and enrollments data
def get_all(kind='2'): ''' Get All the records. ''' return TabPost.select().where( (TabPost.kind == kind) & (TabPost.valid == 1) ).order_by( TabPost.time_update.desc() )
Get All the records.
def remove_entry(self, fs_entry): """Removes an FSEntry object from this METS document. Any children of this FSEntry will also be removed. This will be removed as a child of it's parent, if any. :param metsrw.mets.FSEntry fs_entry: FSEntry to remove from the METS """ try: self._root_elements.remove(fs_entry) except ValueError: # fs_entry may not be in the root elements pass if fs_entry.parent: fs_entry.parent.remove_child(fs_entry) # Reset file lists so they get regenerated without the removed file(s) self._all_files = None
Removes an FSEntry object from this METS document. Any children of this FSEntry will also be removed. This will be removed as a child of it's parent, if any. :param metsrw.mets.FSEntry fs_entry: FSEntry to remove from the METS
def get(self, id, domain='messages'): """ Gets a message translation. @rtype: str @return: The message translation """ assert isinstance(id, (str, unicode)) assert isinstance(domain, (str, unicode)) if self.defines(id, domain): return self.messages[domain][id] if self.fallback_catalogue is not None: return self.fallback_catalogue.get(id, domain) return id
Gets a message translation. @rtype: str @return: The message translation
def is_connected(self, use_cached=True): """Return True if the device is currrently connect and False if not""" device_json = self.get_device_json(use_cached) return int(device_json.get("dpConnectionStatus")) > 0
Return True if the device is currrently connect and False if not
def _check_age(self, pub, min_interval=timedelta(seconds=0)): """Check the age of the receiver. """ now = datetime.utcnow() if (now - self._last_age_check) <= min_interval: return LOGGER.debug("%s - checking addresses", str(datetime.utcnow())) self._last_age_check = now to_del = [] with self._address_lock: for addr, metadata in self._addresses.items(): atime = metadata["receive_time"] if now - atime > self._max_age: mda = {'status': False, 'URI': addr, 'service': metadata['service']} msg = Message('/address/' + metadata['name'], 'info', mda) to_del.append(addr) LOGGER.info("publish remove '%s'", str(msg)) pub.send(msg.encode()) for addr in to_del: del self._addresses[addr]
Check the age of the receiver.
def _serialize(self, value, attr, obj): """Convert the Arrow object into a string.""" if isinstance(value, arrow.arrow.Arrow): value = value.datetime return super(ArrowField, self)._serialize(value, attr, obj)
Convert the Arrow object into a string.
def create_image(self, image_file, caption): """ Create an image with a caption """ suffix = 'png' if image_file: img = Image.open(os.path.join(self.gallery, image_file)) width, height = img.size ratio = width/WIDTH img = img.resize((int(width // ratio), int(height // ratio)), Image.ANTIALIAS) else: img = Image.new('RGB', (WIDTH, HEIGHT), 'black') image = self.add_caption(img, caption) image = img return image
Create an image with a caption
def requestAccountUpdates(self, subscribe=True): """ Register to account updates https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm """ if self.subscribeAccount != subscribe: self.subscribeAccount = subscribe self.ibConn.reqAccountUpdates(subscribe, 0)
Register to account updates https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm
def feedback_results_to_measurements_frame(feedback_result): ''' Extract measured data from `FeedbackResults` instance into `pandas.DataFrame`. ''' index = pd.Index(feedback_result.time * 1e-3, name='seconds') df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_fb, feedback_result.V_hv, feedback_result.fb_resistor, feedback_result.hv_resistor]), columns=['V_fb', 'V_hv', 'fb_resistor', 'hv_resistor'], index=index) df_feedback.insert(0, 'frequency', feedback_result.frequency) return df_feedback
Extract measured data from `FeedbackResults` instance into `pandas.DataFrame`.
def check_acknowledgment(self, ds): ''' Check if acknowledgment/acknowledgment attribute is present. Because acknowledgement has its own check, we are keeping it out of the Global Attributes (even though it is a Global Attr). :param netCDF4.Dataset ds: An open netCDF dataset ''' check = False messages = [] if hasattr(ds, 'acknowledgment') or hasattr(ds, 'acknowledgement'): check = True else: messages.append("acknowledgment/acknowledgement not present") # name="Global Attributes" so gets grouped with Global Attributes return Result(BaseCheck.MEDIUM, check, "Global Attributes", msgs=messages)
Check if acknowledgment/acknowledgment attribute is present. Because acknowledgement has its own check, we are keeping it out of the Global Attributes (even though it is a Global Attr). :param netCDF4.Dataset ds: An open netCDF dataset
def get_hex_color_range(start_color, end_color, quantity): """ Generates a list of quantity Hex colors from start_color to end_color. :param start_color: Hex or plain English color for start of range :param end_color: Hex or plain English color for end of range :param quantity: Number of colours to return :return: A list of Hex color values """ raw_colors = [c.hex for c in list(Color(start_color).range_to(Color(end_color), quantity))] colors = [] for color in raw_colors: # i3bar expects the full Hex value but for some colors the colour # module only returns partial values. So we need to convert these colors to the full # Hex value. if len(color) == 4: fixed_color = "#" for c in color[1:]: fixed_color += c * 2 colors.append(fixed_color) else: colors.append(color) return colors
Generates a list of quantity Hex colors from start_color to end_color. :param start_color: Hex or plain English color for start of range :param end_color: Hex or plain English color for end of range :param quantity: Number of colours to return :return: A list of Hex color values
def start(self, activity, action): ''' Mark an action as started :param activity: The virtualenv activity name :type activity: ``str`` :param action: The virtualenv action :type action: :class:`tox.session.Action` ''' try: self._start_action(activity, action) except ValueError: retox_log.debug("Could not find action %s in env %s" % (activity, self.name)) self.refresh()
Mark an action as started :param activity: The virtualenv activity name :type activity: ``str`` :param action: The virtualenv action :type action: :class:`tox.session.Action`
def _get_index_nd(self, key): """Returns an index array for use in scatter_nd and gather_nd.""" def _is_advanced_index(index): """The definition of advanced index here includes integers as well, while integers are considered as basic index type when the key contains only slices and integers.""" return not isinstance(index, py_slice) if isinstance(key, (NDArray, np.ndarray, list, integer_types, py_slice)): key = (key,) assert isinstance(key, tuple),\ 'index=%s must be a NDArray, or np.ndarray, or list, or tuple ' \ ' type to use advanced indexing, received type=%s' % (str(key), str(type(key))) assert len(key) > 0, "Cannot slice with empty indices" shape = self.shape assert len(shape) >= len(key),\ "Slicing dimensions exceeds array dimensions, %d vs %d" % (len(key), len(shape)) indices = [] dtype = 'int32' # index data type passed to gather_nd op need_broadcast = (len(key) != 1) advanced_indices = [] # include list, NDArray, np.ndarray, integer basic_indices = [] # include only slices advanced_index_bshape = None # final advanced index shape for i, idx_i in enumerate(key): is_advanced_index = True if isinstance(idx_i, (np.ndarray, list, tuple)): idx_i = array(idx_i, ctx=self.context, dtype=dtype) advanced_indices.append(i) elif isinstance(idx_i, py_slice): start, stop, step = _get_index_range(idx_i.start, idx_i.stop, shape[i], idx_i.step) idx_i = arange(start, stop, step, ctx=self.context, dtype=dtype) basic_indices.append(i) is_advanced_index = False elif isinstance(idx_i, integer_types): start, stop, step = _get_index_range(idx_i, idx_i+1, shape[i], 1) idx_i = arange(start, stop, step, ctx=self.context, dtype=dtype) advanced_indices.append(i) elif isinstance(idx_i, NDArray): if dtype != idx_i.dtype: idx_i = idx_i.astype(dtype) advanced_indices.append(i) else: raise IndexError('Indexing NDArray with index=%s of type=%s is not supported' % (str(key), str(type(key)))) if is_advanced_index: if advanced_index_bshape is None: advanced_index_bshape = idx_i.shape elif advanced_index_bshape != idx_i.shape: need_broadcast = True advanced_index_bshape = _get_broadcast_shape(advanced_index_bshape, idx_i.shape) indices.append(idx_i) # Get final index shape for gather_nd. See the following reference # for determining the output array shape. # https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#combining-advanced-and-basic-indexing # pylint: disable=line-too-long if len(advanced_indices) == 0: raise ValueError('Advanced index tuple must contain at least one of the following types:' ' list, tuple, NDArray, np.ndarray, integer, received index=%s' % key) # determine the output array's shape by checking whether advanced_indices are all adjacent # or separated by slices advanced_indices_adjacent = True for i in range(0, len(advanced_indices)-1): if advanced_indices[i] + 1 != advanced_indices[i+1]: advanced_indices_adjacent = False break index_bshape_list = [] # index broadcasted shape if advanced_indices_adjacent: for i in range(0, advanced_indices[0]): index_bshape_list.extend(indices[i].shape) if not need_broadcast and indices[i].shape != advanced_index_bshape: need_broadcast = True index_bshape_list.extend(advanced_index_bshape) for i in range(advanced_indices[-1]+1, len(indices)): if not need_broadcast and indices[i].shape != advanced_index_bshape: need_broadcast = True index_bshape_list.extend(indices[i].shape) else: index_bshape_list.extend(advanced_index_bshape) for i in basic_indices: index_bshape_list.extend(indices[i].shape) if not need_broadcast and indices[i].shape != advanced_index_bshape: need_broadcast = True index_bshape = tuple(index_bshape_list) # Need to broadcast all ndarrays in indices to the final shape. # For example, suppose an array has shape=(5, 6, 7, 8) and # key=(slice(1, 5), [[1, 2]], slice(2, 5), [1]). # Since key[1] and key[3] are two advanced indices here and they are # separated by basic indices key[0] and key[2], the output shape # is (1, 2, 4, 3), where the first two elements come from the shape # that key[1] and key[3] should broadcast to, which is (1, 2), and # the last two elements come from the shape of two basic indices. # In order to broadcast all basic and advanced indices to the output shape, # we need to reshape them based on their axis. For example, to broadcast key[0], # with shape=(4,), we first need to reshape it into (1, 1, 4, 1), and then # broadcast the reshaped array to (1, 2, 4, 3); to broadcast key[1], we first # reshape it into (1, 2, 1, 1), then broadcast the reshaped array to (1, 2, 4, 3). if need_broadcast: broadcasted_indices = [] idx_rshape = [1] * len(index_bshape) if advanced_indices_adjacent: advanced_index_bshape_start = advanced_indices[0] # start index of advanced_index_bshape in index_shape advanced_index_bshape_stop = advanced_index_bshape_start + len(advanced_index_bshape) for i, idx in enumerate(key): if _is_advanced_index(idx): k = advanced_index_bshape_stop # find the reshaped shape for indices[i] for dim_size in indices[i].shape[::-1]: k -= 1 idx_rshape[k] = dim_size else: if i < advanced_indices[0]: # slice is on the left side of advanced indices idx_rshape[i] = indices[i].shape[0] elif i > advanced_indices[-1]: # slice is on the right side of advanced indices idx_rshape[i-len(key)] = indices[i].shape[0] else: raise ValueError('basic index i=%d cannot be between advanced index i=%d and i=%d' % (i, advanced_indices[0], advanced_indices[-1])) # broadcast current index to the final shape broadcasted_indices.append(indices[i].reshape(tuple(idx_rshape)).broadcast_to(index_bshape)) # reset idx_rshape to ones for j, _ in enumerate(idx_rshape): idx_rshape[j] = 1 else: basic_index_offset = len(advanced_index_bshape) for i, idx in enumerate(key): if _is_advanced_index(idx): k = len(advanced_index_bshape) for dim_size in indices[i].shape[::-1]: k -= 1 idx_rshape[k] = dim_size else: idx_rshape[basic_index_offset] = indices[i].shape[0] basic_index_offset += 1 # broadcast current index to the final shape broadcasted_indices.append(indices[i].reshape(tuple(idx_rshape)).broadcast_to(index_bshape)) # reset idx_rshape to ones for j, _ in enumerate(idx_rshape): idx_rshape[j] = 1 indices = broadcasted_indices return op.stack(*indices)
Returns an index array for use in scatter_nd and gather_nd.
def newEntry(self, ident = "", seq = "", plus = "", qual = "") : """Appends an empty entry at the end of the CSV and returns it""" e = FastqEntry() self.data.append(e) return e
Appends an empty entry at the end of the CSV and returns it
def monotonic(values, mode="<", atol=1.e-8): """ Returns False if values are not monotonic (decreasing|increasing). mode is "<" for a decreasing sequence, ">" for an increasing sequence. Two numbers are considered equal if they differ less that atol. .. warning: Not very efficient for large data sets. >>> values = [1.2, 1.3, 1.4] >>> monotonic(values, mode="<") False >>> monotonic(values, mode=">") True """ if len(values) == 1: return True if mode == ">": for i in range(len(values)-1): v, vp = values[i], values[i+1] if abs(vp - v) > atol and vp <= v: return False elif mode == "<": for i in range(len(values)-1): v, vp = values[i], values[i+1] if abs(vp - v) > atol and vp >= v: return False else: raise ValueError("Wrong mode %s" % str(mode)) return True
Returns False if values are not monotonic (decreasing|increasing). mode is "<" for a decreasing sequence, ">" for an increasing sequence. Two numbers are considered equal if they differ less that atol. .. warning: Not very efficient for large data sets. >>> values = [1.2, 1.3, 1.4] >>> monotonic(values, mode="<") False >>> monotonic(values, mode=">") True
def run_query(self, collection_name, query): """ method runs query on a specified collection and return a list of filtered Job records """ cursor = self.ds.filter(collection_name, query) return [Job.from_json(document) for document in cursor]
method runs query on a specified collection and return a list of filtered Job records
def get_default_config_help(self): """ Returns the default collector help text """ config_help = super(UsersCollector, self).get_default_config_help() config_help.update({ }) return config_help
Returns the default collector help text
def noperiodic(r_array, periodic, reference=None): '''Rearrange the array of coordinates *r_array* in a way that doensn't cross the periodic boundary. Parameters ---------- r_array : :class:`numpy.ndarray`, (Nx3) Array of 3D coordinates. periodic: :class:`numpy.ndarray`, (3) Periodic boundary dimensions. reference: ``None`` or :class:`numpy.ndarray` (3) The points will be moved to be in the periodic image centered on the reference. If None, the first point will be taken as a reference Returns ------- A (N, 3) array of coordinates, all in the same periodic image. Example ------- >>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]]) >>> periodic = np.array([1, 1, 1]) >>> noperiodic(coordinates, periodic) [[ 0.1, 0.0, 0.0], [-0.1, 0.0, 0.0]] ''' if reference is None: center = r_array[0] else: center = reference # Find the displacements dr = (center - r_array) drsign = np.sign(dr) # Move things when the displacement is more than half the box size tomove = np.abs(dr) >= periodic / 2.0 r_array[tomove] += (drsign * periodic)[tomove] return r_array
Rearrange the array of coordinates *r_array* in a way that doensn't cross the periodic boundary. Parameters ---------- r_array : :class:`numpy.ndarray`, (Nx3) Array of 3D coordinates. periodic: :class:`numpy.ndarray`, (3) Periodic boundary dimensions. reference: ``None`` or :class:`numpy.ndarray` (3) The points will be moved to be in the periodic image centered on the reference. If None, the first point will be taken as a reference Returns ------- A (N, 3) array of coordinates, all in the same periodic image. Example ------- >>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]]) >>> periodic = np.array([1, 1, 1]) >>> noperiodic(coordinates, periodic) [[ 0.1, 0.0, 0.0], [-0.1, 0.0, 0.0]]
def _linkFeature(self, feature): """ Link a feature with its parents. """ parentNames = feature.attributes.get("Parent") if parentNames is None: self.roots.add(feature) else: for parentName in parentNames: self._linkToParent(feature, parentName)
Link a feature with its parents.
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs): '''Workhorse function for the above, where ``contourfunc`` is the contour plotting function to use for actual plotting.''' if contourfunc is None: contourfunc = plt.tricontour if vertexlabels is None: vertexlabels = ('1','2','3') x = np.linspace(0, 1, 100) y = np.linspace(0, np.sqrt(3.0)/2.0, 100) points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))]) points3d = barycentric(points2d) valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1)) points2d = points2d[np.where(valid),:][0] points3d = points3d[np.where(valid),:][0] z = f(points3d) contourfunc(points2d[:,0], points2d[:,1], z, **kwargs) _draw_axes(vertexlabels) return plt.gcf()
Workhorse function for the above, where ``contourfunc`` is the contour plotting function to use for actual plotting.
def handleOneNodeMsg(self, wrappedMsg): """ Validate and process one message from a node. :param wrappedMsg: Tuple of message and the name of the node that sent the message """ try: vmsg = self.validateNodeMsg(wrappedMsg) if vmsg: logger.trace("{} msg validated {}".format(self, wrappedMsg), extra={"tags": ["node-msg-validation"]}) self.unpackNodeMsg(*vmsg) else: logger.debug("{} invalidated msg {}".format(self, wrappedMsg), extra={"tags": ["node-msg-validation"]}) except SuspiciousNode as ex: self.reportSuspiciousNodeEx(ex) except Exception as ex: msg, frm = wrappedMsg self.discard(msg, ex, logger.info)
Validate and process one message from a node. :param wrappedMsg: Tuple of message and the name of the node that sent the message
def effective_FPS(self): """ Calculates the effective frames-per-second - this should largely correlate to the desired FPS supplied in the constructor, but no guarantees are given. :returns: The effective frame rate. :rtype: float """ if self.start_time is None: self.start_time = 0 elapsed = monotonic() - self.start_time return self.called / elapsed
Calculates the effective frames-per-second - this should largely correlate to the desired FPS supplied in the constructor, but no guarantees are given. :returns: The effective frame rate. :rtype: float
def _check_transition_target(self, transition): """Checks the validity of a transition target Checks whether the transition target is valid. :param rafcon.core.transition.Transition transition: The transition to be checked :return bool validity, str message: validity is True, when the transition is valid, False else. message gives more information especially if the transition is not valid """ to_state_id = transition.to_state to_outcome_id = transition.to_outcome if to_state_id == self.state_id: if to_outcome_id not in self.outcomes: return False, "to_outcome is not existing" else: if to_state_id not in self.states: return False, "to_state is not existing" if to_outcome_id is not None: return False, "to_outcome must be None as transition goes to child state" return True, "valid"
Checks the validity of a transition target Checks whether the transition target is valid. :param rafcon.core.transition.Transition transition: The transition to be checked :return bool validity, str message: validity is True, when the transition is valid, False else. message gives more information especially if the transition is not valid
def fit(self, X, y=None): """Fit FeatureSetSelector for feature selection Parameters ---------- X: array-like of shape (n_samples, n_features) The training input samples. y: array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). Returns ------- self: object Returns a copy of the estimator """ subset_df = pd.read_csv(self.subset_list, header=0, index_col=0) if isinstance(self.sel_subset, int): self.sel_subset_name = subset_df.index[self.sel_subset] elif isinstance(self.sel_subset, str): self.sel_subset_name = self.sel_subset else: # list or tuple self.sel_subset_name = [] for s in self.sel_subset: if isinstance(s, int): self.sel_subset_name.append(subset_df.index[s]) else: self.sel_subset_name.append(s) sel_features = subset_df.loc[self.sel_subset_name, 'Features'] if not isinstance(sel_features, str): sel_features = ";".join(sel_features.tolist()) sel_uniq_features = set(sel_features.split(';')) if isinstance(X, pd.DataFrame): # use columns' names self.feature_names = list(X.columns.values) self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names)))) self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list] elif isinstance(X, np.ndarray): # use index self.feature_names = list(range(X.shape[1])) sel_uniq_features = [int(val) for val in sel_uniq_features] self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names)))) self.feat_list_idx = self.feat_list if not len(self.feat_list): raise ValueError('No feature is found on the subset list!') return self
Fit FeatureSetSelector for feature selection Parameters ---------- X: array-like of shape (n_samples, n_features) The training input samples. y: array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). Returns ------- self: object Returns a copy of the estimator
def findOptimalResults(expName, suite, outFile): """ Go through every experiment in the specified folder. For each experiment, find the iteration with the best validation score, and return the metrics associated with that iteration. """ writer = csv.writer(outFile) headers = ["testAccuracy", "bgAccuracy", "maxTotalAccuracy", "experiment path"] writer.writerow(headers) info = [] print("\n================",expName,"=====================") try: # Retrieve the last totalCorrect from each experiment # Print them sorted from best to worst values, params = suite.get_values_fix_params( expName, 0, "testerror", "last") for p in params: expPath = p["name"] if not "results" in expPath: expPath = os.path.join("results", expPath) maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy = bestScore(expPath, suite) row = [maxTestAccuracy, maxBGAccuracy, maxTotalAccuracy, expPath] info.append(row) writer.writerow(row) print(tabulate(info, headers=headers, tablefmt="grid")) except: print("Couldn't analyze experiment",expName)
Go through every experiment in the specified folder. For each experiment, find the iteration with the best validation score, and return the metrics associated with that iteration.
def readlink(self, path): """ Return the target of a symbolic link (shortcut). You can use L{symlink} to create these. The result may be either an absolute or relative pathname. @param path: path of the symbolic link file @type path: str @return: target path @rtype: str """ path = self._adjust_cwd(path) self._log(DEBUG, 'readlink(%r)' % path) t, msg = self._request(CMD_READLINK, path) if t != CMD_NAME: raise SFTPError('Expected name response') count = msg.get_int() if count == 0: return None if count != 1: raise SFTPError('Readlink returned %d results' % count) return _to_unicode(msg.get_string())
Return the target of a symbolic link (shortcut). You can use L{symlink} to create these. The result may be either an absolute or relative pathname. @param path: path of the symbolic link file @type path: str @return: target path @rtype: str
def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None, max_window=3000, variant_type='snv'): """Load variants for a family into the database. Args: adapter (loqusdb.plugins.Adapter): initialized plugin case_obj(Case): dict with case information nr_variants(int) skip_case_id (bool): whether to include the case id on variant level or not gq_treshold(int) max_window(int): Specify the max size for sv windows variant_type(str): 'sv' or 'snv' Returns: nr_inserted(int) """ if variant_type == 'snv': nr_variants = case_obj['nr_variants'] else: nr_variants = case_obj['nr_sv_variants'] nr_inserted = 0 case_id = case_obj['case_id'] if skip_case_id: case_id = None # Loop over the variants in the vcf with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar: variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar) if variant_type == 'sv': for sv_variant in variants: if not sv_variant: continue adapter.add_structural_variant(variant=sv_variant, max_window=max_window) nr_inserted += 1 if variant_type == 'snv': nr_inserted = adapter.add_variants(variants) LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type) return nr_inserted
Load variants for a family into the database. Args: adapter (loqusdb.plugins.Adapter): initialized plugin case_obj(Case): dict with case information nr_variants(int) skip_case_id (bool): whether to include the case id on variant level or not gq_treshold(int) max_window(int): Specify the max size for sv windows variant_type(str): 'sv' or 'snv' Returns: nr_inserted(int)
async def reseed_init(self, next_seed: str = None) -> str: """ Begin reseed operation: generate new key. Raise WalletState if wallet is closed. :param next_seed: incoming replacement seed (default random) :return: new verification key """ LOGGER.debug('Wallet.reseed_init >>> next_seed: [SEED]') if not self.handle: LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await did.replace_keys_start(self.handle, self.did, json.dumps({'seed': next_seed} if next_seed else {})) LOGGER.debug('Wallet.reseed_init <<< %s', rv) return rv
Begin reseed operation: generate new key. Raise WalletState if wallet is closed. :param next_seed: incoming replacement seed (default random) :return: new verification key
def tag(self, name, formatter=None): """Return instance of Tag. Args: name (str): The value for this tag. formatter (method, optional): A method that take a tag value and returns a formatted tag. Returns: obj: An instance of Tag. """ tag = Tag(name, formatter) for tag_data in self._tags: if tag_data.name == name: tag = tag_data break else: self._tags.append(tag) return tag
Return instance of Tag. Args: name (str): The value for this tag. formatter (method, optional): A method that take a tag value and returns a formatted tag. Returns: obj: An instance of Tag.
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
Ensures that argument passed in arg_name is of type bool.
def _can_construct_from_str(strict_mode: bool, from_type: Type, to_type: Type) -> bool: """ Returns true if the provided types are valid for constructor_with_str_arg conversion Explicitly declare that we are not able to convert primitive types (they already have their own converters) :param strict_mode: :param from_type: :param to_type: :return: """ return to_type not in {int, float, bool}
Returns true if the provided types are valid for constructor_with_str_arg conversion Explicitly declare that we are not able to convert primitive types (they already have their own converters) :param strict_mode: :param from_type: :param to_type: :return:
def is_used(self, regs, i, top=None): """ Checks whether any of the given regs are required from the given point to the end or not. """ if i < 0: i = 0 if self.lock: return True regs = list(regs) # make a copy if top is None: top = len(self) else: top -= 1 for ii in range(i, top): for r in self.mem[ii].requires: if r in regs: return True for r in self.mem[ii].destroys: if r in regs: regs.remove(r) if not regs: return False self.lock = True result = self.goes_requires(regs) self.lock = False return result
Checks whether any of the given regs are required from the given point to the end or not.
async def wait_done(self) -> int: """Coroutine to wait for subprocess run completion. Returns: The exit code of the subprocess. """ await self._done_running_evt.wait() if self._exit_code is None: raise SublemonLifetimeError( 'Subprocess exited abnormally with `None` exit code') return self._exit_code
Coroutine to wait for subprocess run completion. Returns: The exit code of the subprocess.
def _refresh_multi_axis(self): """ If linked axis' are used, setup and link them """ d = self.declaration #: Create a separate viewbox self.viewbox = pg.ViewBox() #: If this is the first nested plot, use the parent right axis _plots = [c for c in self.parent().children() if isinstance(c,AbstractQtPlotItem)] i = _plots.index(self) if i==0: self.axis = self.widget.getAxis('right') self.widget.showAxis('right') else: self.axis = pg.AxisItem('right') self.axis.setZValue(-10000) #: Add new axis to scene self.widget.layout.addItem(self.axis,2,i+2) #: Link x axis to the parent axis self.viewbox.setXLink(self.widget.vb) #: Link y axis to the view self.axis.linkToView(self.viewbox) #: Set axis label self.axis.setLabel(d.label_right) #: Add Viewbox to parent scene self.parent().parent_widget().scene().addItem(self.viewbox)
If linked axis' are used, setup and link them
def moving_average(iterable, n): """ From Python collections module documentation moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0 """ it = iter(iterable) d = collections.deque(itertools.islice(it, n - 1)) d.appendleft(0) s = sum(d) for elem in it: s += elem - d.popleft() d.append(elem) yield s / float(n)
From Python collections module documentation moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
def from_unit_cube(self, x): """ Used by multinest :param x: 0 < x < 1 :param lower_bound: :param upper_bound: :return: """ mu = self.mu.value sigma = self.sigma.value sqrt_two = 1.414213562 if x < 1e-16 or (1 - x) < 1e-16: res = -1e32 else: res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x)) return res
Used by multinest :param x: 0 < x < 1 :param lower_bound: :param upper_bound: :return:
def init_with_context(self, context): """ Initializes the status list. """ super(CacheStatusGroup, self).init_with_context(context) if 'dashboardmods' in settings.INSTALLED_APPS: import dashboardmods memcache_mods = dashboardmods.get_memcache_dash_modules() try: varnish_mods = dashboardmods.get_varnish_dash_modules() except (socket.error, KeyError) as e: # dashboardmods 2.2 throws KeyError for 'cache_misses' when the Varnish cache is empty. # Socket errors are also ignored, to work similar to the memcache stats. logger.exception("Unable to request Varnish stats: {0}".format(str(e))) varnish_mods = [] except ImportError: varnish_mods = [] self.children = memcache_mods + varnish_mods
Initializes the status list.
def add_resource_types(resource_i, types): """ Save a reference to the types used for this resource. @returns a list of type_ids representing the type ids on the resource. """ if types is None: return [] existing_type_ids = [] if resource_i.types: for t in resource_i.types: existing_type_ids.append(t.type_id) new_type_ids = [] for templatetype in types: if templatetype.id in existing_type_ids: continue rt_i = ResourceType() rt_i.type_id = templatetype.id rt_i.ref_key = resource_i.ref_key if resource_i.ref_key == 'NODE': rt_i.node_id = resource_i.id elif resource_i.ref_key == 'LINK': rt_i.link_id = resource_i.id elif resource_i.ref_key == 'GROUP': rt_i.group_id = resource_i.id resource_i.types.append(rt_i) new_type_ids.append(templatetype.id) return new_type_ids
Save a reference to the types used for this resource. @returns a list of type_ids representing the type ids on the resource.
def _get_stmt_by_group(self, stmt_type, stmts_this_type, eh): """Group Statements of `stmt_type` by their hierarchical relations.""" # Dict of stmt group key tuples, indexed by their first Agent stmt_by_first = collections.defaultdict(lambda: []) # Dict of stmt group key tuples, indexed by their second Agent stmt_by_second = collections.defaultdict(lambda: []) # Dict of statements with None first, with second Agent as keys none_first = collections.defaultdict(lambda: []) # Dict of statements with None second, with first Agent as keys none_second = collections.defaultdict(lambda: []) # The dict of all statement groups, with tuples of components # or entity_matches_keys as keys stmt_by_group = collections.defaultdict(lambda: []) # Here we group Statements according to the hierarchy graph # components that their agents are part of for stmt_tuple in stmts_this_type: _, stmt = stmt_tuple entities = self._get_entities(stmt, stmt_type, eh) # At this point we have an entity list # If we're dealing with Complexes, sort the entities and use # as dict key if stmt_type == Complex: # There shouldn't be any statements of the type # e.g., Complex([Foo, None, Bar]) assert None not in entities assert len(entities) > 0 entities.sort() key = tuple(entities) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) elif stmt_type == Conversion: assert len(entities) > 0 key = (entities[0], tuple(sorted(entities[1:len(stmt.obj_from)+1])), tuple(sorted(entities[-len(stmt.obj_to):]))) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) # Now look at all other statement types # All other statements will have one or two entities elif len(entities) == 1: # If only one entity, we only need the one key # It should not be None! assert None not in entities key = tuple(entities) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) else: # Make sure we only have two entities, and they are not both # None key = tuple(entities) assert len(key) == 2 assert key != (None, None) # First agent is None; add in the statements, indexed by # 2nd if key[0] is None and stmt_tuple not in none_first[key[1]]: none_first[key[1]].append(stmt_tuple) # Second agent is None; add in the statements, indexed by # 1st elif key[1] is None and stmt_tuple not in none_second[key[0]]: none_second[key[0]].append(stmt_tuple) # Neither entity is None! elif None not in key: if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) if key not in stmt_by_first[key[0]]: stmt_by_first[key[0]].append(key) if key not in stmt_by_second[key[1]]: stmt_by_second[key[1]].append(key) # When we've gotten here, we should have stmt_by_group entries, and # we may or may not have stmt_by_first/second dicts filled out # (depending on the statement type). if none_first: # Get the keys associated with stmts having a None first # argument for second_arg, stmts in none_first.items(): # Look for any statements with this second arg second_arg_keys = stmt_by_second[second_arg] # If there are no more specific statements matching this # set of statements with a None first arg, then the # statements with the None first arg deserve to be in # their own group. if not second_arg_keys: stmt_by_group[(None, second_arg)] = stmts # On the other hand, if there are statements with a matching # second arg component, we need to add the None first # statements to all groups with the matching second arg for second_arg_key in second_arg_keys: stmt_by_group[second_arg_key] += stmts # Now do the corresponding steps for the statements with None as the # second argument: if none_second: for first_arg, stmts in none_second.items(): # Look for any statements with this first arg first_arg_keys = stmt_by_first[first_arg] # If there are no more specific statements matching this # set of statements with a None second arg, then the # statements with the None second arg deserve to be in # their own group. if not first_arg_keys: stmt_by_group[(first_arg, None)] = stmts # On the other hand, if there are statements with a matching # first arg component, we need to add the None second # statements to all groups with the matching first arg for first_arg_key in first_arg_keys: stmt_by_group[first_arg_key] += stmts return stmt_by_group
Group Statements of `stmt_type` by their hierarchical relations.
def _gen_keep_files(name, require, walk_d=None): ''' Generate the list of files that need to be kept when a dir based function like directory or recurse has a clean. ''' def _is_child(path, directory): ''' Check whether ``path`` is child of ``directory`` ''' path = os.path.abspath(path) directory = os.path.abspath(directory) relative = os.path.relpath(path, directory) return not relative.startswith(os.pardir) def _add_current_path(path): _ret = set() if os.path.isdir(path): dirs, files = walk_d.get(path, ((), ())) _ret.add(path) for _name in files: _ret.add(os.path.join(path, _name)) for _name in dirs: _ret.add(os.path.join(path, _name)) return _ret def _process_by_walk_d(name, ret): if os.path.isdir(name): walk_ret.update(_add_current_path(name)) dirs, _ = walk_d.get(name, ((), ())) for _d in dirs: p = os.path.join(name, _d) walk_ret.update(_add_current_path(p)) _process_by_walk_d(p, ret) def _process(name): ret = set() if os.path.isdir(name): for root, dirs, files in salt.utils.path.os_walk(name): ret.add(name) for name in files: ret.add(os.path.join(root, name)) for name in dirs: ret.add(os.path.join(root, name)) return ret keep = set() if isinstance(require, list): required_files = [comp for comp in require if 'file' in comp] for comp in required_files: for low in __lowstate__: # A requirement should match either the ID and the name of # another state. if low['name'] == comp['file'] or low['__id__'] == comp['file']: fn = low['name'] fun = low['fun'] if os.path.isdir(fn): if _is_child(fn, name): if fun == 'recurse': fkeep = _gen_recurse_managed_files(**low)[3] log.debug('Keep from %s: %s', fn, fkeep) keep.update(fkeep) elif walk_d: walk_ret = set() _process_by_walk_d(fn, walk_ret) keep.update(walk_ret) else: keep.update(_process(fn)) else: keep.add(fn) log.debug('Files to keep from required states: %s', list(keep)) return list(keep)
Generate the list of files that need to be kept when a dir based function like directory or recurse has a clean.
def _sample_item(self, **kwargs): """Sample an item from the pool according to the instrumental distribution """ t = self.t_ if 'fixed_stratum' in kwargs: stratum_idx = kwargs['fixed_stratum'] else: stratum_idx = None if stratum_idx is not None: # Sample in given stratum loc = self.strata._sample_in_stratum(stratum_idx, replace=False) # Record instrumental distribution if self.record_inst_hist: self.inst_pmf_[stratum_idx,t] = 1 else: # Choose stratum based on instrumental distribution self._calc_inst_pmf() if self.record_inst_hist: inst_pmf = self.inst_pmf_[:,t] else: inst_pmf = self.inst_pmf_ loc, stratum_idx = self.strata.sample(pmf = inst_pmf, replace=False) return loc, 1, {'stratum': stratum_idx}
Sample an item from the pool according to the instrumental distribution
def uyirmei_constructed( mei_idx, uyir_idx): """ construct uyirmei letter give mei index and uyir index """ idx,idy = mei_idx,uyir_idx assert ( idy >= 0 and idy < uyir_len() ) assert ( idx >= 0 and idx < 6+mei_len() ) return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx]
construct uyirmei letter give mei index and uyir index
def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip): """Deletes VLAN Profile from UCS Manager.""" vlan_name = self.make_vlan_name(vlan_id) vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX + vlan_name) try: handle.StartTransaction() obj = handle.GetManagedObject( None, self.ucsmsdk.FabricVlan.ClassId(), {self.ucsmsdk.FabricVlan.DN: vlan_profile_dest}) if obj: handle.RemoveManagedObject(obj) handle.CompleteTransaction() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
Deletes VLAN Profile from UCS Manager.
def tail_messages(self, topic="", passive=False, **kw): """ Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`. Args: topic (six.text_type): The topic to subscribe to. The default is to subscribe to all topics. passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets instead of connecting to them. Defaults to ``False``. **kw: Additional keyword arguments. Currently none are used. Yields: tuple: A 4-tuple in the form (name, endpoint, topic, message). """ if not self.c.get('zmq_enabled', True): raise ValueError("fedmsg.tail_messages() is only available for " "zeromq. Use the hub-consumer approach for " "STOMP or AMQP support.") poller, subs = self._create_poller(topic=topic, passive=False, **kw) try: for msg in self._poll(poller, subs): yield msg finally: self._close_subs(subs)
Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`. Args: topic (six.text_type): The topic to subscribe to. The default is to subscribe to all topics. passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets instead of connecting to them. Defaults to ``False``. **kw: Additional keyword arguments. Currently none are used. Yields: tuple: A 4-tuple in the form (name, endpoint, topic, message).
def _jws_header(keyid, algorithm): """Produce a base64-encoded JWS header.""" data = { 'typ': 'JWT', 'alg': algorithm.name, # 'kid' is used to indicate the public part of the key # used during signing. 'kid': keyid } datajson = json.dumps(data, sort_keys=True).encode('utf8') return base64url_encode(datajson)
Produce a base64-encoded JWS header.
def energy_coefficients(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1): """ Return the energy coefficients. This assumes that the system has aligned spins only. """ implemented_phase_order = 7 implemented_spin_order = 7 if phase_order > implemented_phase_order: raise ValueError("pN coeffiecients of that order have not been implemented") elif phase_order == -1: phase_order = implemented_phase_order if spin_order > implemented_spin_order: raise ValueError("pN coeffiecients of that order have not been implemented") elif spin_order == -1: spin_order = implemented_spin_order qmdef1 = 1.0 qmdef2 = 1.0 M = m1 + m2 dm = (m1-m2)/M m1M = m1 / M m2M = m2 / M s1z = s1z * m1M * m1M s2z = s2z * m2M * m2M _, eta = mass1_mass2_to_mchirp_eta(m1, m2) ecof = numpy.zeros(phase_order+1) # Orbital terms if phase_order >= 0: ecof[0] = 1.0 if phase_order >= 1: ecof[1] = 0 if phase_order >= 2: ecof[2] = -(1.0/12.0) * (9.0 + eta) if phase_order >= 3: ecof[3] = 0 if phase_order >= 4: ecof[4] = (-81.0 + 57.0*eta - eta*eta) / 24.0 if phase_order >= 5: ecof[5] = 0 if phase_order >= 6: ecof[6] = - 675.0/64.0 + ( 34445.0/576.0 \ - 205.0/96.0 * lal.PI * lal.PI ) * eta \ - (155.0/96.0) *eta * eta - 35.0/5184.0 * eta * eta # Spin terms ESO15s1 = 8.0/3.0 + 2.0*m2/m1 ESO15s2 = 8.0/3.0 + 2.0*m1/m2 ESS2 = 1.0 / eta EQM2s1 = qmdef1/2.0/m1M/m1M EQM2s1L = -qmdef1*3.0/2.0/m1M/m1M #EQM2s2 = qmdef2/2.0/m2M/m2M EQM2s2L = -qmdef2*3.0/2.0/m2M/m2M ESO25s1 = 11.0 - 61.0*eta/9.0 + (dm/m1M) * (-3.0 + 10.*eta/3.0) ESO25s2 = 11.0 - 61.0*eta/9.0 + (dm/m2M) * (3.0 - 10.*eta/3.0) ESO35s1 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 + (dm/m1M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0) ESO35s2 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 - (dm/m2M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0) if spin_order >=3: ecof[3] += ESO15s1 * s1z + ESO15s2 * s2z if spin_order >=4: ecof[4] += ESS2 * (s1z*s2z - 3.0*s1z*s2z) ecof[4] += EQM2s1*s1z*s1z + EQM2s1*s2z*s2z + EQM2s1L*s1z*s1z + EQM2s2L*s2z*s2z if spin_order >=5: ecof[5] = ESO25s1*s1z + ESO25s2*s2z if spin_order >=7: ecof[7] += ESO35s1*s1z + ESO35s2*s2z return ecof
Return the energy coefficients. This assumes that the system has aligned spins only.
def retrieve_data_directory(self): """ Retrieve the data directory Look first into config_filename_global then into config_filename_user. The latter takes preeminence. """ args = self.args try: if args['datadirectory']: aux.ensure_dir(args['datadirectory']) return args['datadirectory'] except KeyError: pass config = configparser.ConfigParser() config.read([config_filename_global, self.config_filename_user]) section = config.default_section data_path = config.get(section, 'Data directory', fallback='~/.local/share/greg') data_path_expanded = os.path.expanduser(data_path) aux.ensure_dir(data_path_expanded) return os.path.expanduser(data_path_expanded)
Retrieve the data directory Look first into config_filename_global then into config_filename_user. The latter takes preeminence.
def get_range(self): """ Get range """ if not self.page: return (1, self.last_blocks[self.coinid]) # Get start of the range start = self.page * self.limit # Get finish of the range end = (self.page + 1) * self.limit if start > self.last_blocks[self.coinid]: return (1,1) if end > self.last_blocks[self.coinid]: return (start, self.last_blocks[self.coinid]) return (start, end)
Get range
def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None, mount_point='aws-ec2'): """POST /auth/<mount_point>/config/client Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs. The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The credentials configured using this endpoint will be used to query the status of the instances via DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the credentials are still not found and if the method is configured on an EC2 instance with metadata querying capabilities, the credentials are fetched automatically :param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the specific configurations. If using the iam auth method without inferencing, then no credentials are necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured role, and that role must have the permissions described here. :type access_key: str|unicode :param secret_key: AWS Secret key with permissions to query AWS APIs. :type secret_key: str|unicode :param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls. :type endpoint: str|unicode :param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2" is the default argument for backwards comparability within this module. :type mount_point: str|unicode :return: The response of the request. :rtype: requests.Response """ params = { 'access_key': access_key, 'secret_key': secret_key } if endpoint is not None: params['endpoint'] = endpoint return self._adapter.post('/v1/auth/{0}/config/client'.format(mount_point), json=params)
POST /auth/<mount_point>/config/client Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs. The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The credentials configured using this endpoint will be used to query the status of the instances via DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the credentials are still not found and if the method is configured on an EC2 instance with metadata querying capabilities, the credentials are fetched automatically :param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the specific configurations. If using the iam auth method without inferencing, then no credentials are necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured role, and that role must have the permissions described here. :type access_key: str|unicode :param secret_key: AWS Secret key with permissions to query AWS APIs. :type secret_key: str|unicode :param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls. :type endpoint: str|unicode :param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2" is the default argument for backwards comparability within this module. :type mount_point: str|unicode :return: The response of the request. :rtype: requests.Response
def parent(self, resource): """Set parent resource :param resource: parent resource :type resource: Resource :raises ResourceNotFound: resource not found on the API """ resource.check() self['parent_type'] = resource.type self['parent_uuid'] = resource.uuid
Set parent resource :param resource: parent resource :type resource: Resource :raises ResourceNotFound: resource not found on the API
def _get_view_result(view, raw_result, **kwargs): """ Get view results helper. """ if raw_result: return view(**kwargs) if kwargs: return Result(view, **kwargs) return view.result
Get view results helper.
def set_header(self, name, format, *args): """ Set node header; these are provided to other nodes during discovery and come in each ENTER message. """ return lib.zyre_set_header(self._as_parameter_, name, format, *args)
Set node header; these are provided to other nodes during discovery and come in each ENTER message.
def scatter2d(data, **kwargs): """Create a 2D scatter plot Builds upon `matplotlib.pyplot.scatter` with nice defaults and handles categorical colors / legends better. Parameters ---------- data : array-like, shape=[n_samples, n_features] Input data. Only the first two components will be used. c : list-like or None, optional (default: None) Color vector. Can be a single color value (RGB, RGBA, or named matplotlib colors), an array of these of length n_samples, or a list of discrete or continuous values of any data type. If `c` is not a single or list of matplotlib colors, the values in `c` will be used to populate the legend / colorbar with colors from `cmap` cmap : `matplotlib` colormap, str, dict or None, optional (default: None) matplotlib colormap. If None, uses `tab20` for discrete data and `inferno` for continuous data. If a dictionary, expects one key for every unique value in `c`, where values are valid matplotlib colors (hsv, rbg, rgba, or named colors) s : float, optional (default: 1) Point size. discrete : bool or None, optional (default: None) If True, the legend is categorical. If False, the legend is a colorbar. If None, discreteness is detected automatically. Data containing non-numeric `c` is always discrete, and numeric data with 20 or less unique values is discrete. ax : `matplotlib.Axes` or None, optional (default: None) axis on which to plot. If None, an axis is created legend : bool, optional (default: True) States whether or not to create a legend. If data is continuous, the legend is a colorbar. figsize : tuple, optional (default: None) Tuple of floats for creation of new `matplotlib` figure. Only used if `ax` is None. xticks : True, False, or list-like (default: False) If True, keeps default x ticks. If False, removes x ticks. If a list, sets custom x ticks yticks : True, False, or list-like (default: False) If True, keeps default y ticks. If False, removes y ticks. If a list, sets custom y ticks zticks : True, False, or list-like (default: False) If True, keeps default z ticks. If False, removes z ticks. If a list, sets custom z ticks. Only used for 3D plots. xticklabels : True, False, or list-like (default: True) If True, keeps default x tick labels. If False, removes x tick labels. If a list, sets custom x tick labels yticklabels : True, False, or list-like (default: True) If True, keeps default y tick labels. If False, removes y tick labels. If a list, sets custom y tick labels zticklabels : True, False, or list-like (default: True) If True, keeps default z tick labels. If False, removes z tick labels. If a list, sets custom z tick labels. Only used for 3D plots. label_prefix : str or None (default: "PHATE") Prefix for all axis labels. Axes will be labelled `label_prefix`1, `label_prefix`2, etc. Can be overriden by setting `xlabel`, `ylabel`, and `zlabel`. xlabel : str or None (default : None) Label for the x axis. Overrides the automatic label given by label_prefix. If None and label_prefix is None, no label is set. ylabel : str or None (default : None) Label for the y axis. Overrides the automatic label given by label_prefix. If None and label_prefix is None, no label is set. zlabel : str or None (default : None) Label for the z axis. Overrides the automatic label given by label_prefix. If None and label_prefix is None, no label is set. Only used for 3D plots. title : str or None (default: None) axis title. If None, no title is set. legend_title : str (default: "") title for the colorbar of legend legend_loc : int or string or pair of floats, default: 'best' Matplotlib legend location. Only used for discrete data. See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html> for details. filename : str or None (default: None) file to which the output is saved dpi : int or None, optional (default: None) The resolution in dots per inch. If None it will default to the value savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi to be the value of the figure. Only used if filename is not None. **plot_kwargs : keyword arguments Extra arguments passed to `matplotlib.pyplot.scatter`. Returns ------- ax : `matplotlib.Axes` axis on which plot was drawn Examples -------- >>> import phate >>> import matplotlib.pyplot as plt >>> ### >>> # Running PHATE >>> ### >>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20, ... branch_length=100) >>> tree_data.shape (2000, 100) >>> phate_operator = phate.PHATE(k=5, a=20, t=150) >>> tree_phate = phate_operator.fit_transform(tree_data) >>> tree_phate.shape (2000, 2) >>> ### >>> # Plotting using phate.plot >>> ### >>> phate.plot.scatter2d(tree_phate, c=tree_clusters) >>> # You can also pass the PHATE operator instead of data >>> phate.plot.scatter2d(phate_operator, c=tree_clusters) >>> phate.plot.scatter3d(phate_operator, c=tree_clusters) >>> ### >>> # Using a cmap dictionary >>> ### >>> import numpy as np >>> X = np.random.normal(0,1,[1000,2]) >>> c = np.random.choice(['a','b'], 1000, replace=True) >>> X[c=='a'] += 10 >>> phate.plot.scatter2d(X, c=c, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'}) """ warnings.warn("`phate.plot.scatter2d` is deprecated. " "Use `scprep.plot.scatter2d` instead.", FutureWarning) data = _get_plot_data(data, ndim=2) return scprep.plot.scatter2d(data, **kwargs)
Create a 2D scatter plot Builds upon `matplotlib.pyplot.scatter` with nice defaults and handles categorical colors / legends better. Parameters ---------- data : array-like, shape=[n_samples, n_features] Input data. Only the first two components will be used. c : list-like or None, optional (default: None) Color vector. Can be a single color value (RGB, RGBA, or named matplotlib colors), an array of these of length n_samples, or a list of discrete or continuous values of any data type. If `c` is not a single or list of matplotlib colors, the values in `c` will be used to populate the legend / colorbar with colors from `cmap` cmap : `matplotlib` colormap, str, dict or None, optional (default: None) matplotlib colormap. If None, uses `tab20` for discrete data and `inferno` for continuous data. If a dictionary, expects one key for every unique value in `c`, where values are valid matplotlib colors (hsv, rbg, rgba, or named colors) s : float, optional (default: 1) Point size. discrete : bool or None, optional (default: None) If True, the legend is categorical. If False, the legend is a colorbar. If None, discreteness is detected automatically. Data containing non-numeric `c` is always discrete, and numeric data with 20 or less unique values is discrete. ax : `matplotlib.Axes` or None, optional (default: None) axis on which to plot. If None, an axis is created legend : bool, optional (default: True) States whether or not to create a legend. If data is continuous, the legend is a colorbar. figsize : tuple, optional (default: None) Tuple of floats for creation of new `matplotlib` figure. Only used if `ax` is None. xticks : True, False, or list-like (default: False) If True, keeps default x ticks. If False, removes x ticks. If a list, sets custom x ticks yticks : True, False, or list-like (default: False) If True, keeps default y ticks. If False, removes y ticks. If a list, sets custom y ticks zticks : True, False, or list-like (default: False) If True, keeps default z ticks. If False, removes z ticks. If a list, sets custom z ticks. Only used for 3D plots. xticklabels : True, False, or list-like (default: True) If True, keeps default x tick labels. If False, removes x tick labels. If a list, sets custom x tick labels yticklabels : True, False, or list-like (default: True) If True, keeps default y tick labels. If False, removes y tick labels. If a list, sets custom y tick labels zticklabels : True, False, or list-like (default: True) If True, keeps default z tick labels. If False, removes z tick labels. If a list, sets custom z tick labels. Only used for 3D plots. label_prefix : str or None (default: "PHATE") Prefix for all axis labels. Axes will be labelled `label_prefix`1, `label_prefix`2, etc. Can be overriden by setting `xlabel`, `ylabel`, and `zlabel`. xlabel : str or None (default : None) Label for the x axis. Overrides the automatic label given by label_prefix. If None and label_prefix is None, no label is set. ylabel : str or None (default : None) Label for the y axis. Overrides the automatic label given by label_prefix. If None and label_prefix is None, no label is set. zlabel : str or None (default : None) Label for the z axis. Overrides the automatic label given by label_prefix. If None and label_prefix is None, no label is set. Only used for 3D plots. title : str or None (default: None) axis title. If None, no title is set. legend_title : str (default: "") title for the colorbar of legend legend_loc : int or string or pair of floats, default: 'best' Matplotlib legend location. Only used for discrete data. See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html> for details. filename : str or None (default: None) file to which the output is saved dpi : int or None, optional (default: None) The resolution in dots per inch. If None it will default to the value savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi to be the value of the figure. Only used if filename is not None. **plot_kwargs : keyword arguments Extra arguments passed to `matplotlib.pyplot.scatter`. Returns ------- ax : `matplotlib.Axes` axis on which plot was drawn Examples -------- >>> import phate >>> import matplotlib.pyplot as plt >>> ### >>> # Running PHATE >>> ### >>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20, ... branch_length=100) >>> tree_data.shape (2000, 100) >>> phate_operator = phate.PHATE(k=5, a=20, t=150) >>> tree_phate = phate_operator.fit_transform(tree_data) >>> tree_phate.shape (2000, 2) >>> ### >>> # Plotting using phate.plot >>> ### >>> phate.plot.scatter2d(tree_phate, c=tree_clusters) >>> # You can also pass the PHATE operator instead of data >>> phate.plot.scatter2d(phate_operator, c=tree_clusters) >>> phate.plot.scatter3d(phate_operator, c=tree_clusters) >>> ### >>> # Using a cmap dictionary >>> ### >>> import numpy as np >>> X = np.random.normal(0,1,[1000,2]) >>> c = np.random.choice(['a','b'], 1000, replace=True) >>> X[c=='a'] += 10 >>> phate.plot.scatter2d(X, c=c, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'})
def _read_waypoints_v110(self, file): '''read a version 110 waypoint''' comment = '' for line in file: if line.startswith('#'): comment = line[1:].lstrip() continue line = line.strip() if not line: continue a = line.split() if len(a) != 12: raise MAVWPError("invalid waypoint line with %u values" % len(a)) if mavutil.mavlink10(): fn = mavutil.mavlink.MAVLink_mission_item_message else: fn = mavutil.mavlink.MAVLink_waypoint_message w = fn(self.target_system, self.target_component, int(a[0]), # seq int(a[2]), # frame int(a[3]), # command int(a[1]), # current int(a[11]), # autocontinue float(a[4]), # param1, float(a[5]), # param2, float(a[6]), # param3 float(a[7]), # param4 float(a[8]), # x (latitude) float(a[9]), # y (longitude) float(a[10]) # z (altitude) ) if w.command == 0 and w.seq == 0 and self.count() == 0: # special handling for Mission Planner created home wp w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT self.add(w, comment) comment = ''
read a version 110 waypoint
def cartesian_to_barycentric_3D(tri, xy): ''' cartesian_to_barycentric_3D(tri,xy) is identical to cartesian_to_barycentric_2D(tri,xy) except it works on 3D data. Note that if tri is a 3 x 3 x n, a 3 x n x 3 or an n x 3 x 3 matrix, the first dimension must always be the triangle vertices and the second 3-sized dimension must be the (x,y,z) coordinates. ''' xy = np.asarray(xy) tri = np.asarray(tri) if len(xy.shape) == 1: return cartesian_to_barycentric_3D(np.transpose(np.asarray([tri]), (1,2,0)), np.asarray([xy]).T)[:,0] xy = xy if xy.shape[0] == 3 else xy.T if tri.shape[0] == 3: tri = tri if tri.shape[1] == 3 else np.transpose(tri, (0,2,1)) elif tri.shape[1] == 3: tri = tri.T if tri.shape[0] == 3 else np.transpose(tri, (1,2,0)) elif tri.shape[2] == 3: tri = np.transpose(tri, (2,1,0) if tri.shape[1] == 3 else (2,0,1)) if tri.shape[0] != 3 or tri.shape[1] != 3: raise ValueError('Triangle array did not have dimensions of sizes 3 and 3') if xy.shape[0] != 3: raise ValueError('coordinate matrix did not have a dimension of size 3') if tri.shape[2] != xy.shape[1]: raise ValueError('number of triangles and coordinates must match') # The algorithm here is borrowed from this stack-exchange post: # http://gamedev.stackexchange.com/questions/23743 # in which it is attributed to Christer Ericson's book Real-Time Collision Detection. v0 = tri[1] - tri[0] v1 = tri[2] - tri[0] v2 = xy - tri[0] d00 = np.sum(v0 * v0, axis=0) d01 = np.sum(v0 * v1, axis=0) d11 = np.sum(v1 * v1, axis=0) d20 = np.sum(v2 * v0, axis=0) d21 = np.sum(v2 * v1, axis=0) den = d00*d11 - d01*d01 zero = np.isclose(den, 0) unit = 1 - zero den += zero l2 = unit * (d11 * d20 - d01 * d21) / den l3 = unit * (d00 * d21 - d01 * d20) / den return np.asarray([1.0 - l2 - l3, l2])
cartesian_to_barycentric_3D(tri,xy) is identical to cartesian_to_barycentric_2D(tri,xy) except it works on 3D data. Note that if tri is a 3 x 3 x n, a 3 x n x 3 or an n x 3 x 3 matrix, the first dimension must always be the triangle vertices and the second 3-sized dimension must be the (x,y,z) coordinates.
def _found_barcode(self, record, sample, barcode=None): """Hook called when barcode is found""" assert record.id == self.current_record['sequence_name'] self.current_record['sample'] = sample
Hook called when barcode is found
def daterange(start, stop, step=1, inclusive=False): """In the spirit of :func:`range` and :func:`xrange`, the `daterange` generator that yields a sequence of :class:`~datetime.date` objects, starting at *start*, incrementing by *step*, until *stop* is reached. When *inclusive* is True, the final date may be *stop*, **if** *step* falls evenly on it. By default, *step* is one day. See details below for many more details. Args: start (datetime.date): The starting date The first value in the sequence. stop (datetime.date): The stopping date. By default not included in return. Can be `None` to yield an infinite sequence. step (int): The value to increment *start* by to reach *stop*. Can be an :class:`int` number of days, a :class:`datetime.timedelta`, or a :class:`tuple` of integers, `(year, month, day)`. Positive and negative *step* values are supported. inclusive (bool): Whether or not the *stop* date can be returned. *stop* is only returned when a *step* falls evenly on it. >>> christmas = date(year=2015, month=12, day=25) >>> boxing_day = date(year=2015, month=12, day=26) >>> new_year = date(year=2016, month=1, day=1) >>> for day in daterange(christmas, new_year): ... print(repr(day)) datetime.date(2015, 12, 25) datetime.date(2015, 12, 26) datetime.date(2015, 12, 27) datetime.date(2015, 12, 28) datetime.date(2015, 12, 29) datetime.date(2015, 12, 30) datetime.date(2015, 12, 31) >>> for day in daterange(christmas, boxing_day): ... print(repr(day)) datetime.date(2015, 12, 25) >>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1), ... step=(0, 1, 0), inclusive=True): ... print(repr(day)) datetime.date(2017, 5, 1) datetime.date(2017, 6, 1) datetime.date(2017, 7, 1) datetime.date(2017, 8, 1) *Be careful when using stop=None, as this will yield an infinite sequence of dates.* """ if not isinstance(start, date): raise TypeError("start expected datetime.date instance") if stop and not isinstance(stop, date): raise TypeError("stop expected datetime.date instance or None") try: y_step, m_step, d_step = step except TypeError: y_step, m_step, d_step = 0, 0, step else: y_step, m_step = int(y_step), int(m_step) if isinstance(d_step, int): d_step = timedelta(days=int(d_step)) elif isinstance(d_step, timedelta): pass else: raise ValueError('step expected int, timedelta, or tuple' ' (year, month, day), not: %r' % step) if stop is None: finished = lambda t: False elif start < stop: finished = operator.gt if inclusive else operator.ge else: finished = operator.lt if inclusive else operator.le now = start while not finished(now, stop): yield now if y_step or m_step: m_y_step, cur_month = divmod(now.month + m_step, 12) now = now.replace(year=now.year + y_step + m_y_step, month=cur_month or 12) now = now + d_step return
In the spirit of :func:`range` and :func:`xrange`, the `daterange` generator that yields a sequence of :class:`~datetime.date` objects, starting at *start*, incrementing by *step*, until *stop* is reached. When *inclusive* is True, the final date may be *stop*, **if** *step* falls evenly on it. By default, *step* is one day. See details below for many more details. Args: start (datetime.date): The starting date The first value in the sequence. stop (datetime.date): The stopping date. By default not included in return. Can be `None` to yield an infinite sequence. step (int): The value to increment *start* by to reach *stop*. Can be an :class:`int` number of days, a :class:`datetime.timedelta`, or a :class:`tuple` of integers, `(year, month, day)`. Positive and negative *step* values are supported. inclusive (bool): Whether or not the *stop* date can be returned. *stop* is only returned when a *step* falls evenly on it. >>> christmas = date(year=2015, month=12, day=25) >>> boxing_day = date(year=2015, month=12, day=26) >>> new_year = date(year=2016, month=1, day=1) >>> for day in daterange(christmas, new_year): ... print(repr(day)) datetime.date(2015, 12, 25) datetime.date(2015, 12, 26) datetime.date(2015, 12, 27) datetime.date(2015, 12, 28) datetime.date(2015, 12, 29) datetime.date(2015, 12, 30) datetime.date(2015, 12, 31) >>> for day in daterange(christmas, boxing_day): ... print(repr(day)) datetime.date(2015, 12, 25) >>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1), ... step=(0, 1, 0), inclusive=True): ... print(repr(day)) datetime.date(2017, 5, 1) datetime.date(2017, 6, 1) datetime.date(2017, 7, 1) datetime.date(2017, 8, 1) *Be careful when using stop=None, as this will yield an infinite sequence of dates.*
def _take_ownership(self): """Make the Python instance take ownership of the GIBaseInfo. i.e. unref if the python instance gets gc'ed. """ if self: ptr = cast(self.value, GIBaseInfo) _UnrefFinalizer.track(self, ptr) self.__owns = True
Make the Python instance take ownership of the GIBaseInfo. i.e. unref if the python instance gets gc'ed.
def just_load_srno(srno, prm_filename=None): """Simply load an dataset based on serial number (srno). This convenience function reads a dataset based on a serial number. This serial number (srno) must then be defined in your database. It is mainly used to check that things are set up correctly. Args: prm_filename: name of parameter file (optional). srno (int): serial number Example: >>> srno = 918 >>> just_load_srno(srno) srno: 918 read prms .... """ from cellpy import dbreader, filefinder print("just_load_srno: srno: %i" % srno) # ------------reading parameters-------------------------------------------- # print "just_load_srno: read prms" # prm = prmreader.read(prm_filename) # # print prm print("just_load_srno: making class and setting prms") d = CellpyData() # ------------reading db---------------------------------------------------- print() print("just_load_srno: starting to load reader") # reader = dbreader.reader(prm_filename) reader = dbreader.Reader() print("------ok------") run_name = reader.get_cell_name(srno) print("just_load_srno: run_name:") print(run_name) m = reader.get_mass(srno) print("just_load_srno: mass: %f" % m) print() # ------------loadcell------------------------------------------------------ print("just_load_srno: getting file_names") raw_files, cellpy_file = filefinder.search_for_files(run_name) print("raw_files:", raw_files) print("cellpy_file:", cellpy_file) print("just_load_srno: running loadcell") d.loadcell(raw_files, cellpy_file, mass=m) print("------ok------") # ------------do stuff------------------------------------------------------ print("just_load_srno: getting step_numbers for charge") v = d.get_step_numbers("charge") print(v) print() print("just_load_srno: finding C-rates") d.find_C_rates(v, silent=False) print() print("just_load_srno: OK") return True
Simply load an dataset based on serial number (srno). This convenience function reads a dataset based on a serial number. This serial number (srno) must then be defined in your database. It is mainly used to check that things are set up correctly. Args: prm_filename: name of parameter file (optional). srno (int): serial number Example: >>> srno = 918 >>> just_load_srno(srno) srno: 918 read prms ....
def validate(self, data): """Validate data. Raise NotValid error for invalid data.""" validated = self._validated(data) errors = [] for validator in self.additional_validators: if not validator(validated): errors.append( "%s invalidated by '%s'" % ( validated, _get_repr(validator))) if errors: raise NotValid(*errors) if self.default is UNSPECIFIED: return validated if self.null_values is not UNSPECIFIED\ and validated in self.null_values: return self.default if validated is None: return self.default return validated
Validate data. Raise NotValid error for invalid data.
def from_spcm(filepath, name=None, *, delimiter=",", parent=None, verbose=True) -> Data: """Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``). If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object. See the `spcm`__ software hompage for more info. __ http://www.becker-hickl.com/software/spcm.htm Parameters ---------- filepath : path-like Path to SPC-xxx .asc file. Can be either a local or remote file (http/ftp). Can be compressed with gz/bz2, decompression based on file name. name : string (optional) Name to give to the created data object. If None, filename is used. Default is None. delimiter : string (optional) The string used to separate values. Default is ','. parent : WrightTools.Collection (optional) Collection to place new data object within. Default is None. verbose : boolean (optional) Toggle talkback. Default is True. Returns ------- WrightTools.data.Data object """ filestr = os.fspath(filepath) filepath = pathlib.Path(filepath) # check filepath if not ".asc" in filepath.suffixes: wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc") # parse name if not name: name = filepath.name.split(".")[0] # create headers dictionary headers = collections.OrderedDict() header_lines = 0 ds = np.DataSource(None) f = ds.open(filestr, "rt") while True: line = f.readline().strip() header_lines += 1 if len(line) == 0: break else: key, value = line.split(":", 1) if key.strip() == "Revision": headers["resolution"] = int(value.strip(" bits ADC")) else: headers[key.strip()] = value.strip() line = f.readline().strip() while "_BEGIN" in line: header_lines += 1 section = line.split("_BEGIN")[0] while True: line = f.readline().strip() header_lines += 1 if section + "_END" in line: break if section == "SYS_PARA": use_type = { "B": lambda b: int(b) == 1, "C": str, # e.g. #SP [SP_OVERFL,C,N] "F": float, "I": int, "L": int, # e.g. #DI [DI_MAXCNT,L,128] "S": str, "U": int, # unsigned int? } item = line[line.find("[") + 1 : line.find("]")].split(",") key = item[0] value = use_type[item[1]](item[2]) headers[key] = value else: splitted = line.split() value = splitted[-1][1:-1].split(",") key = " ".join(splitted[:-1]) headers[key] = value line = f.readline().strip() if "END" in line: header_lines += 1 break if "Date" in headers.keys() and "Time" in headers.keys(): # NOTE: reports created in local time, no-way to calculate absolute time created = " ".join([headers["Date"], headers["Time"]]) created = time.strptime(created, "%Y-%m-%d %H:%M:%S") created = timestamp.TimeStamp(time.mktime(created)).RFC3339 headers["created"] = created # initialize data object kwargs = {"name": name, "kind": "spcm", "source": filestr, **headers} if parent: data = parent.create_data(**kwargs) else: data = Data(**kwargs) # import data f.seek(0) arr = np.genfromtxt( f, skip_header=(header_lines + 1), skip_footer=1, delimiter=delimiter, unpack=True ) f.close() # construct data data.create_variable(name="time", values=arr[0], units="ns") data.create_channel(name="counts", values=arr[1]) data.transform("time") # finish if verbose: print("data created at {0}".format(data.fullpath)) print(" kind: {0}".format(data.kind)) print(" range: {0} to {1} (ns)".format(data.time[0], data.time[-1])) print(" size: {0}".format(data.size)) if "SP_COL_T" in data.attrs.keys(): print(" collection time: {0} sec".format(data.attrs["SP_COL_T"])) return data
Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``). If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object. See the `spcm`__ software hompage for more info. __ http://www.becker-hickl.com/software/spcm.htm Parameters ---------- filepath : path-like Path to SPC-xxx .asc file. Can be either a local or remote file (http/ftp). Can be compressed with gz/bz2, decompression based on file name. name : string (optional) Name to give to the created data object. If None, filename is used. Default is None. delimiter : string (optional) The string used to separate values. Default is ','. parent : WrightTools.Collection (optional) Collection to place new data object within. Default is None. verbose : boolean (optional) Toggle talkback. Default is True. Returns ------- WrightTools.data.Data object
def sigma_to_pressure(sigma, psfc, ptop): r"""Calculate pressure from sigma values. Parameters ---------- sigma : ndarray The sigma levels to be converted to pressure levels. psfc : `pint.Quantity` The surface pressure value. ptop : `pint.Quantity` The pressure value at the top of the model domain. Returns ------- `pint.Quantity` The pressure values at the given sigma levels. Notes ----- Sigma definition adapted from [Philips1957]_. .. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top} * :math:`p` is pressure at a given `\sigma` level * :math:`\sigma` is non-dimensional, scaled pressure * :math:`p_{sfc}` is pressure at the surface or model floor * :math:`p_{top}` is pressure at the top of the model domain """ if np.any(sigma < 0) or np.any(sigma > 1): raise ValueError('Sigma values should be bounded by 0 and 1') if psfc.magnitude < 0 or ptop.magnitude < 0: raise ValueError('Pressure input should be non-negative') return sigma * (psfc - ptop) + ptop
r"""Calculate pressure from sigma values. Parameters ---------- sigma : ndarray The sigma levels to be converted to pressure levels. psfc : `pint.Quantity` The surface pressure value. ptop : `pint.Quantity` The pressure value at the top of the model domain. Returns ------- `pint.Quantity` The pressure values at the given sigma levels. Notes ----- Sigma definition adapted from [Philips1957]_. .. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top} * :math:`p` is pressure at a given `\sigma` level * :math:`\sigma` is non-dimensional, scaled pressure * :math:`p_{sfc}` is pressure at the surface or model floor * :math:`p_{top}` is pressure at the top of the model domain
def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None): """ will plot a stack of traces one above the other assuming tarray.shape = numSamples, numRows """ data = tarray numSamples, numRows = tarray.shape # data = np.random.randn(numSamples,numRows) # test data # data.shape = numSamples, numRows if seconds: t = seconds * np.arange(numSamples, dtype=float)/numSamples # import pdb # pdb.set_trace() if start_time: t = t+start_time xlm = (start_time, start_time+seconds) else: xlm = (0,seconds) else: t = np.arange(numSamples, dtype=float) xlm = (0,numSamples) ticklocs = [] ax = plt.subplot(111) plt.xlim(*xlm) # xticks(np.linspace(xlm, 10)) dmin = data.min() dmax = data.max() dr = (dmax - dmin)*0.7 # Crowd them a bit. y0 = dmin y1 = (numRows-1) * dr + dmax plt.ylim(y0, y1) segs = [] for i in range(numRows): segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis]))) # print "segs[-1].shape:", segs[-1].shape ticklocs.append(i*dr) offsets = np.zeros((numRows,2), dtype=float) offsets[:,1] = ticklocs lines = LineCollection(segs, offsets=offsets, transOffset=None, ) ax.add_collection(lines) # set the yticks to use axes coords on the y axis ax.set_yticks(ticklocs) # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9']) # if not plt.ylabels: plt.ylabels = ["%d" % ii for ii in range(numRows)] ax.set_yticklabels(ylabels) plt.xlabel('time (s)')
will plot a stack of traces one above the other assuming tarray.shape = numSamples, numRows
def find_files(directory=".", ext=None, name=None, match_case=False, disable_glob=False, depth=None, abspath=False, enable_scandir=False): """ Walk through a file directory and return an iterator of files that match requirements. Will autodetect if name has glob as magic characters. Note: For the example below, you can use find_files_list to return as a list, this is simply an easy way to show the output. .. code:: python list(reusables.find_files(name="ex", match_case=True)) # ['C:\\example.pdf', # 'C:\\My_exam_score.txt'] list(reusables.find_files(name="*free*")) # ['C:\\my_stuff\\Freedom_fight.pdf'] list(reusables.find_files(ext=".pdf")) # ['C:\\Example.pdf', # 'C:\\how_to_program.pdf', # 'C:\\Hunks_and_Chicks.pdf'] list(reusables.find_files(name="*chris*")) # ['C:\\Christmas_card.docx', # 'C:\\chris_stuff.zip'] :param directory: Top location to recursively search for matching files :param ext: Extensions of the file you are looking for :param name: Part of the file name :param match_case: If name or ext has to be a direct match or not :param disable_glob: Do not look for globable names or use glob magic check :param depth: How many directories down to search :param abspath: Return files with their absolute paths :param enable_scandir: on python < 3.5 enable external scandir package :return: generator of all files in the specified directory """ if ext or not name: disable_glob = True if not disable_glob: disable_glob = not glob.has_magic(name) if ext and isinstance(ext, str): ext = [ext] elif ext and not isinstance(ext, (list, tuple)): raise TypeError("extension must be either one extension or a list") if abspath: directory = os.path.abspath(directory) starting_depth = directory.count(os.sep) for root, dirs, files in _walk(directory, enable_scandir=enable_scandir): if depth and root.count(os.sep) - starting_depth >= depth: continue if not disable_glob: if match_case: raise ValueError("Cannot use glob and match case, please " "either disable glob or not set match_case") glob_generator = glob.iglob(os.path.join(root, name)) for item in glob_generator: yield item continue for file_name in files: if ext: for end in ext: if file_name.lower().endswith(end.lower() if not match_case else end): break else: continue if name: if match_case and name not in file_name: continue elif name.lower() not in file_name.lower(): continue yield os.path.join(root, file_name)
Walk through a file directory and return an iterator of files that match requirements. Will autodetect if name has glob as magic characters. Note: For the example below, you can use find_files_list to return as a list, this is simply an easy way to show the output. .. code:: python list(reusables.find_files(name="ex", match_case=True)) # ['C:\\example.pdf', # 'C:\\My_exam_score.txt'] list(reusables.find_files(name="*free*")) # ['C:\\my_stuff\\Freedom_fight.pdf'] list(reusables.find_files(ext=".pdf")) # ['C:\\Example.pdf', # 'C:\\how_to_program.pdf', # 'C:\\Hunks_and_Chicks.pdf'] list(reusables.find_files(name="*chris*")) # ['C:\\Christmas_card.docx', # 'C:\\chris_stuff.zip'] :param directory: Top location to recursively search for matching files :param ext: Extensions of the file you are looking for :param name: Part of the file name :param match_case: If name or ext has to be a direct match or not :param disable_glob: Do not look for globable names or use glob magic check :param depth: How many directories down to search :param abspath: Return files with their absolute paths :param enable_scandir: on python < 3.5 enable external scandir package :return: generator of all files in the specified directory
def sync(self): """ Syncs the parent app changes with the current app instance. :return: Synced App object. """ app = self._api.post(url=self._URL['sync'].format(id=self.id)).json() return App(api=self._api, **app)
Syncs the parent app changes with the current app instance. :return: Synced App object.