code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def img_opacity(image, opacity): assert 0 <= opacity <= 1, 'Opacity must be a float between 0 and 1' assert os.path.isfile(image), 'Image is not a file' im = Image.open(image) if im.mode != 'RGBA': im = im.convert('RGBA') else: im = im.copy() alpha = im.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) im.putalpha(alpha) dst = _add_suffix(image, str(str(int(opacity * 100)) + '%'), ext='.png') im.save(dst) return dst
Reduce the opacity of a PNG image. Inspiration: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879 :param image: PNG image file :param opacity: float representing opacity percentage :return: Path to modified PNG
def _build_block_element_list(self): return sorted( [e for e in self.block_elements.values() if not e.virtual], key=lambda e: e.priority, reverse=True )
Return a list of block elements, ordered from highest priority to lowest.
def mpl_to_bokeh(properties): new_properties = {} for k, v in properties.items(): if isinstance(v, dict): new_properties[k] = v elif k == 's': new_properties['size'] = v elif k == 'marker': new_properties.update(markers.get(v, {'marker': v})) elif (k == 'color' or k.endswith('_color')) and not isinstance(v, (dict, dim)): with abbreviated_exception(): v = COLOR_ALIASES.get(v, v) if isinstance(v, tuple): with abbreviated_exception(): v = rgb2hex(v) new_properties[k] = v else: new_properties[k] = v new_properties.pop('cmap', None) return new_properties
Utility to process style properties converting any matplotlib specific options to their nearest bokeh equivalent.
def get_twitter_id(self, cache=True): if not (cache and ('twitter' in self.cache)): response = self.get_attribute('twitter') self.cache['twitter'] = response['artist'].get('twitter') return self.cache['twitter']
Get the twitter id for this artist if it exists Args: Kwargs: Returns: A twitter ID string Example: >>> a = artist.Artist('big boi') >>> a.get_twitter_id() u'BigBoi' >>>
def read(self, location): location = os.path.expanduser(location) for e in ENCODINGS: try: with codecs.open(location, 'r', e) as f: return f.read(), e except UnicodeDecodeError: pass raise Exception('Unable to open file: %r' % location)
Read file from disk.
def _add_form_fields(obj, lines): lines.append("**Form fields:**") lines.append("") for name, field in obj.base_fields.items(): field_type = "{}.{}".format(field.__class__.__module__, field.__class__.__name__) tpl = "* ``{name}``: {label} (:class:`~{field_type}`)" lines.append(tpl.format( name=name, field=field, label=field.label or name.replace('_', ' ').title(), field_type=field_type ))
Improve the documentation of a Django Form class. This highlights the available fields in the form.
def getfiles(qfiles, dirname, names): for name in names: fullname = os.path.join(dirname, name) if os.path.isfile(fullname) and \ fullname.endswith('.cf') or \ fullname.endswith('.post'): qfiles.put(fullname)
Get rule files in a directory
def selected_classification(self): item = self.lstClassifications.currentItem() try: return definition(item.data(QtCore.Qt.UserRole)) except (AttributeError, NameError): return None
Obtain the classification selected by user. :returns: Metadata of the selected classification. :rtype: dict, None
def solve_sweep_wavelength( self, structure, wavelengths, filename="wavelength_n_effs.dat", plot=True, ): n_effs = [] for w in tqdm.tqdm(wavelengths, ncols=70): structure.change_wavelength(w) self.solve(structure) n_effs.append(np.real(self.n_effs)) if filename: self._write_n_effs_to_file( n_effs, self._modes_directory + filename, wavelengths ) if plot: if MPL: title = "$n_{eff}$ vs Wavelength" y_label = "$n_{eff}$" else: title = "n_{effs} vs Wavelength" % x_label y_label = "n_{eff}" self._plot_n_effs( self._modes_directory + filename, self._modes_directory + "fraction_te.dat", "Wavelength", "n_{eff}", title, ) return n_effs
Solve for the effective indices of a fixed structure at different wavelengths. Args: structure (Slabs): The target structure to solve for modes. wavelengths (list): A list of wavelengths to sweep over. filename (str): The nominal filename to use when saving the effective indices. Defaults to 'wavelength_n_effs.dat'. plot (bool): `True` if plots should be generates, otherwise `False`. Default is `True`. Returns: list: A list of the effective indices found for each wavelength.
def equivalent_crust_cohesion(self): deprecation("Will be moved to a function") if len(self.layers) > 1: crust = self.layer(0) crust_phi_r = np.radians(crust.phi) equivalent_cohesion = crust.cohesion + crust.k_0 * self.crust_effective_unit_weight * \ self.layer_depth(1) / 2 * np.tan(crust_phi_r) return equivalent_cohesion
Calculate the equivalent crust cohesion strength according to Karamitros et al. 2013 sett, pg 8 eq. 14 :return: equivalent cohesion [Pa]
def get_section2items(self, itemkey): sec_items = [] section2usrnts = self.get_section2usrnts() for section, usrnts in section2usrnts.items(): items = set([e for nt in usrnts for e in getattr(nt, itemkey, set())]) sec_items.append((section, items)) return cx.OrderedDict(sec_items)
Collect all items into a single set per section.
def create(gandi, private_key, certificate, certificate_id): if not certificate and not certificate_id: gandi.echo('One of --certificate or --certificate-id is needed.') return if certificate and certificate_id: gandi.echo('Only one of --certificate or --certificate-id is needed.') if os.path.isfile(private_key): with open(private_key) as fhandle: private_key = fhandle.read() if certificate: if os.path.isfile(certificate): with open(certificate) as fhandle: certificate = fhandle.read() else: cert = gandi.certificate.info(certificate_id) certificate = gandi.certificate.pretty_format_cert(cert) result = gandi.hostedcert.create(private_key, certificate) output_keys = ['id', 'subject', 'date_created', 'date_expire', 'fqdns', 'vhosts'] output_hostedcert(gandi, result, output_keys) return result
Create a new hosted certificate.
def _sync_outlineexplorer_file_order(self): if self.outlineexplorer is not None: self.outlineexplorer.treewidget.set_editor_ids_order( [finfo.editor.get_document_id() for finfo in self.data])
Order the root file items of the outline explorer as in the tabbar of the current EditorStack.
def fetch_request_ids(item_ids, cls, attr_name, verification_list=None): if not item_ids: return [] items = [] for item_id in item_ids: item = cls.fetch_by_id(item_id) if not item or (verification_list is not None and item not in verification_list): raise InvalidId(attr_name) items.append(item) return items
Return a list of cls instances for all the ids provided in item_ids. :param item_ids: The list of ids to fetch objects for :param cls: The class to fetch the ids from :param attr_name: The name of the attribute for exception purposes :param verification_list: If provided, a list of acceptable instances Raise InvalidId exception using attr_name if any do not exist, or are not present in the verification_list.
def constraints(self, chunk): a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
Returns a list of constraints that match the given Chunk.
def outlier_cutoff(a, threshold=3.5): A = np.array(a, dtype=float) M = np.median(A) D = np.absolute(A - M) MAD = np.median(D) C = threshold / .67449 * MAD return M - C, M + C
Iglewicz and Hoaglin's robust, returns the cutoff values - lower bound and upper bound.
def memory_data(): vm = psutil.virtual_memory() sw = psutil.swap_memory() return { 'virtual': { 'total': mark(vm.total, 'bytes'), 'free': mark(vm.free, 'bytes'), 'percent': mark(vm.percent, 'percentage') }, 'swap': { 'total': mark(sw.total, 'bytes'), 'free': mark(sw.free, 'bytes'), 'percent': mark(sw.percent, 'percentage') }, }
Returns memory data.
def center_land(world): y_sums = world.layers['elevation'].data.sum(1) y_with_min_sum = y_sums.argmin() if get_verbose(): print("geo.center_land: height complete") x_sums = world.layers['elevation'].data.sum(0) x_with_min_sum = x_sums.argmin() if get_verbose(): print("geo.center_land: width complete") latshift = 0 world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1) world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1) if get_verbose(): print("geo.center_land: width complete")
Translate the map horizontally and vertically to put as much ocean as possible at the borders. It operates on elevation and plates map
def SUB(classical_reg, right): left, right = unpack_reg_val_pair(classical_reg, right) return ClassicalSub(left, right)
Produce a SUB instruction. :param classical_reg: Left operand for the arithmetic operation. Also serves as the store target. :param right: Right operand for the arithmetic operation. :return: A ClassicalSub instance.
def analyze(self, webpage): detected_apps = set() for app_name, app in self.apps.items(): if self._has_app(app, webpage): detected_apps.add(app_name) detected_apps |= self._get_implied_apps(detected_apps) return detected_apps
Return a list of applications that can be detected on the web page.
def add_options(cls, manager): kw = {} if flake8.__version__ >= '3.0.0': kw['parse_from_config'] = True manager.add_option( "--known-modules", action='store', default="", help=( "User defined mapping between a project name and a list of" " provided modules. For example: ``--known-modules=project:" "[Project],extra-project:[extras,utilities]``." ), **kw )
Register plug-in specific options.
def extract_data(data_path, feature): population = nm.load_neurons(data_path) feature_data = [nm.get(feature, n) for n in population] feature_data = list(chain(*feature_data)) return stats.optimal_distribution(feature_data)
Loads a list of neurons, extracts feature and transforms the fitted distribution in the correct format. Returns the optimal distribution, corresponding parameters, minimun and maximum values.
def one_or_none(self): if self._metadata is not None: raise RuntimeError( "Can not call `.one` or `.one_or_none` after " "stream consumption has already started." ) iterator = iter(self) try: answer = next(iterator) except StopIteration: return None try: next(iterator) raise ValueError("Expected one result; got more.") except StopIteration: return answer
Return exactly one result, or None if there are no results. :raises: :exc:`ValueError`: If there are multiple results. :raises: :exc:`RuntimeError`: If consumption has already occurred, in whole or in part.
def start_watcher(conf, watcher_plugin_class, health_plugin_class, iterations=None, sleep_time=1): if CURRENT_STATE._stop_all: logging.debug("Not starting plugins: Global stop") return watcher_plugin, health_plugin = \ start_plugins(conf, watcher_plugin_class, health_plugin_class, sleep_time) CURRENT_STATE.add_plugin(watcher_plugin) CURRENT_STATE.add_plugin(health_plugin) _event_monitor_loop(conf['region_name'], conf['vpc_id'], watcher_plugin, health_plugin, iterations, sleep_time, conf['route_recheck_interval']) stop_plugins(watcher_plugin, health_plugin)
Start watcher loop, listening for config changes or failed hosts. Also starts the various service threads. VPC router watches for any changes in the config and updates/adds/deletes routes as necessary. If failed hosts are reported, routes are also updated as needed. This function starts a few working threads: - The watcher plugin to monitor for updated route specs. - A health monitor plugin for instances mentioned in the route spec. It then drops into a loop to receive messages from the health monitoring thread and watcher plugin and re-process the config if any failed IPs are reported. The loop itself is in its own function to facilitate easier testing.
def regular_index(*dfs): original_index = [df.index for df in dfs] have_bad_index = [not isinstance(df.index, pd.RangeIndex) for df in dfs] for df, bad in zip(dfs, have_bad_index): if bad: df.reset_index(drop=True, inplace=True) try: yield dfs finally: for df, bad, idx in zip(dfs, have_bad_index, original_index): if bad and len(df.index) == len(idx): df.index = idx
Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0]) >>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13]) Within the contexmanager all frames have nice range indices >>> with regular_index(df1, df2, df3): ... print(df1.index) ... print(df2.index) ... print(df3.index) RangeIndex(start=0, stop=4, step=1) RangeIndex(start=0, stop=3, step=1) RangeIndex(start=0, stop=3, step=1) Indices restored >>> df1.index RangeIndex(start=0, stop=4, step=1) >>> df2.index Int64Index([3, 0, 0], dtype='int64') >>> df3.index Int64Index([11, 12, 13], dtype='int64')
def get (self, key, def_val=None): assert isinstance(key, basestring) return dict.get(self, key.lower(), def_val)
Return lowercase key value.
def contains(self, val): (start, end) = self.__val_convert(val) retlen = 0 for r in self.__has: if start < r[1] and end > r[0]: retlen += ((end < r[1] and end) or r[1]) - ((start > r[0] and start) or r[0]) return retlen
Check if given value or range is present. Parameters ---------- val : int or tuple or list or range Range or integer being checked. Returns ------- retlen : int Length of overlapping with `val` subranges.
def _has_streamhandler(logger, level=None, fmt=LOG_FORMAT, stream=DEFAULT_STREAM): if isinstance(level, basestring): level = logging.getLevelName(level) for handler in logger.handlers: if not isinstance(handler, logging.StreamHandler): continue if handler.stream is not stream: continue if handler.level != level: continue if not handler.formatter or handler.formatter._fmt != fmt: continue return True return False
Check the named logger for an appropriate existing StreamHandler. This only returns True if a StreamHandler that exaclty matches our specification is found. If other StreamHandlers are seen, we assume they were added for a different purpose.
def is_empty(bam_file): bam_file = objectstore.cl_input(bam_file) cmd = ("set -o pipefail; " "samtools view {bam_file} | head -1 | wc -l") p = subprocess.Popen(cmd.format(**locals()), shell=True, executable=do.find_bash(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() if ((p.returncode == 0 or p.returncode == 141) and (stderr == "" or (stderr.startswith("gof3r") and stderr.endswith("broken pipe")))): return int(stdout) == 0 else: raise ValueError("Failed to check empty status of BAM file: %s" % str(stderr))
Determine if a BAM file is empty
def setup_catalog_mappings(portal): logger.info("*** Setup Catalog Mappings ***") at = api.get_tool("archetype_tool") for portal_type, catalogs in CATALOG_MAPPINGS: at.setCatalogsByType(portal_type, catalogs)
Setup portal_type -> catalog mappings
def func_source_data(func): filename = inspect.getsourcefile(func) lineno = inspect.getsourcelines(func)[1] source = inspect.getsource(func) return filename, lineno, source
Return data about a function source, including file name, line number, and source code. Parameters ---------- func : object May be anything support by the inspect module, such as a function, method, or class. Returns ------- filename : str lineno : int The line number on which the function starts. source : str
def _get_param(self, section, key, config, default=None): if section in config and key in config[section]: return config[section][key] if default is not None: return default else: raise MissingParameter(section, key)
Get configuration parameter "key" from config @str section: the section of the config file @str key: the key to get @dict config: the configuration (dictionnary) @str default: the default value if parameter "key" is not present @rtype: str (value of config['key'] if present default otherwith
def _put(self, rtracker): with self._lock: if self._available < self.capacity: for i in self._unavailable_range(): if self._reference_queue[i] is rtracker: break else: raise UnknownResourceError j = self._resource_end rq = self._reference_queue rq[i], rq[j] = rq[j], rq[i] self._resource_end = (self._resource_end + 1) % self.maxsize self._available += 1 self._not_empty.notify() else: raise PoolFullError
Put a resource back in the queue. :param rtracker: A resource. :type rtracker: :class:`_ResourceTracker` :raises PoolFullError: If pool is full. :raises UnknownResourceError: If resource can't be found.
def recent(self): kwd = { 'pager': '', 'title': 'Recent Pages', } self.render('wiki_page/wiki_list.html', view=MWiki.query_recent(), format_date=tools.format_date, kwd=kwd, userinfo=self.userinfo)
List recent wiki.
def plot(self, **kwds): ax = plt.gca() self.coords.plot.scatter(x=0, y=1, ax=ax, **kwds) ax.get_xaxis().set_major_locator(MultipleLocator(base=1.0)) ax.get_yaxis().set_major_locator(MultipleLocator(base=1.0)) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_xlabel('') ax.set_ylabel('') ax.set_aspect(1) return ax
Plot the coordinates in the first two dimensions of the projection. Removes axis and tick labels, and sets the grid spacing to 1 unit. One way to display the grid is to use `Seaborn`_: Args: **kwds: Passed to :py:meth:`pandas.DataFrame.plot.scatter`. Examples: >>> from pymds import DistanceMatrix >>> import pandas as pd >>> import seaborn as sns >>> sns.set_style('whitegrid') >>> dist = pd.DataFrame({ ... 'a': [0.0, 1.0, 2.0], ... 'b': [1.0, 0.0, 3 ** 0.5], ... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c']) >>> dm = DistanceMatrix(dist) >>> pro = dm.optimize() >>> ax = pro.plot(c='black', s=50, edgecolor='white') Returns: :py:obj:`matplotlib.axes.Axes` .. _Seaborn: https://seaborn.pydata.org/
def get_version(): config = RawConfigParser() config.read(os.path.join('..', 'setup.cfg')) return config.get('metadata', 'version')
Return package version from setup.cfg
def _get_record(self, model_class, record_id): url = '{host}/{namespace}/{model}/{id}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), id=record_id ) data = self._get_json(url)['data'] fresh_model = model_class(data['attributes']) fresh_model.id = data['id'] fresh_model.validate() if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_model
Get a single record from the API. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
def set_sdk_enabled(cls, value): if cls.XRAY_ENABLED_KEY in os.environ: cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false' else: if type(value) == bool: cls.__SDK_ENABLED = value else: cls.__SDK_ENABLED = True log.warning("Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...")
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set, otherwise, set the enabled flag to be equal to the environment variable. If the env variable is an invalid string boolean, it will default to true. :param bool value: Flag to set whether the SDK is enabled or disabled. Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
def nonnegative_float(s): err_msg = "must be either positive or zero, not %r" % s try: value = float(s) except ValueError: raise argparse.ArgumentTypeError(err_msg) if value < 0: raise argparse.ArgumentTypeError(err_msg) return value
Ensure argument is a positive real number or zero and return it as float. To be used as type in argparse arguments.
def first_active(self): result = None for actor in self.actors: if not actor.skip: result = actor break return result
Returns the first non-skipped actor. :return: the first active actor, None if not available :rtype: Actor
def _check_env_var(envvar: str) -> bool: if os.getenv(envvar) is None: raise KeyError( "Required ENVVAR: {0} is not set".format(envvar)) if not os.getenv(envvar): raise KeyError( "Required ENVVAR: {0} is empty".format(envvar)) return True
Check Environment Variable to verify that it is set and not empty. :param envvar: Environment Variable to Check. :returns: True if Environment Variable is set and not empty. :raises: KeyError if Environment Variable is not set or is empty. .. versionadded:: 0.0.12
def html2rst(html_string, force_headers=False, center_cells=False, center_headers=False): if os.path.isfile(html_string): file = open(html_string, 'r', encoding='utf-8') lines = file.readlines() file.close() html_string = ''.join(lines) table_data, spans, use_headers = html2data( html_string) if table_data == '': return '' if force_headers: use_headers = True return data2rst(table_data, spans, use_headers, center_cells, center_headers)
Convert a string or html file to an rst table string. Parameters ---------- html_string : str Either the html string, or the filepath to the html force_headers : bool Make the first row become headers, whether or not they are headers in the html file. center_cells : bool Whether or not to center the contents of the cells center_headers : bool Whether or not to center the contents of the header cells Returns ------- str The html table converted to an rst grid table Notes ----- This function **requires** BeautifulSoup_ to work. Example ------- >>> html_text = ''' ... <table> ... <tr> ... <th> ... Header 1 ... </th> ... <th> ... Header 2 ... </th> ... <th> ... Header 3 ... </th> ... <tr> ... <td> ... <p>This is a paragraph</p> ... </td> ... <td> ... <ul> ... <li>List item 1</li> ... <li>List item 2</li> ... </ul> ... </td> ... <td> ... <ol> ... <li>Ordered 1</li> ... <li>Ordered 2</li> ... </ol> ... </td> ... </tr> ... </table> ... ''' >>> import dashtable >>> print(dashtable.html2rst(html_text)) +---------------------+----------------+--------------+ | Header 1 | Header 2 | Header 3 | +=====================+================+==============+ | This is a paragraph | - List item 1 | #. Ordered 1 | | | - List item 2 | #. Ordered 2 | +---------------------+----------------+--------------+ .. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
def unpack(packet): validate_packet(packet) version = packet[0] try: pyof_lib = PYOF_VERSION_LIBS[version] except KeyError: raise UnpackException('Version not supported') try: message = pyof_lib.common.utils.unpack_message(packet) return message except (UnpackException, ValueError) as exception: raise UnpackException(exception)
Unpack the OpenFlow Packet and returns a message. Args: packet: buffer with the openflow packet. Returns: GenericMessage: Message unpacked based on openflow packet. Raises: UnpackException: if the packet can't be unpacked.
def first(self, offset): if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) end_date = end = self.index[0] + offset if not offset.isAnchored() and hasattr(offset, '_inc'): if end_date in self.index: end = self.index.searchsorted(end_date, side='left') return self.iloc[:end] return self.loc[:end]
Convenience method for subsetting initial periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calender days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned.
def confirmation(self, pdu): if _debug: StreamToPacket._debug("StreamToPacket.confirmation %r", pdu) for packet in self.packetize(pdu, self.upstreamBuffer): self.response(packet)
Message going upstream.
def _ParseEntryArrayObject(self, file_object, file_offset): entry_array_object_map = self._GetDataTypeMap( 'systemd_journal_entry_array_object') try: entry_array_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_array_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry array object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY: raise errors.ParseError('Unsupported object type: {0:d}.'.format( entry_array_object.object_type)) if entry_array_object.object_flags != 0: raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( entry_array_object.object_flags)) return entry_array_object
Parses an entry array object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry array object relative to the start of the file-like object. Returns: systemd_journal_entry_array_object: entry array object. Raises: ParseError: if the entry array object cannot be parsed.
def registrations(self): if self._registrations is None: self._registrations = AuthTypeRegistrationsList( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], ) return self._registrations
Access the registrations :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.AuthTypeRegistrationsList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.AuthTypeRegistrationsList
def partition_by_vid(self, ref): from ambry.orm import Partition p = self.session.query(Partition).filter(Partition.vid == str(ref)).first() if p: return self.wrap_partition(p) else: return None
A much faster way to get partitions, by vid only
def add_filter(self, filter_): assert has_pil, _("Cannot add filters without python PIL") self.cache.basename += filter_.basename self._filters.append(filter_)
Add an image filter for post-processing
def _hashi_weight_generator(self, node_name, node_conf): ks = (node_conf['vnodes'] * len(self._nodes) * node_conf['weight']) // self._weight_sum for w in range(0, ks): w_node_name = '%s-%s' % (node_name, w) for i in range(0, self._replicas): yield self.hashi(w_node_name, replica=i)
Calculate the weight factor of the given node and yield its hash key for every configured replica. :param node_name: the node name.
def generate_argument_parser(cls, tree, actions={}): cur_as, cur_subas = tree parser = devassistant_argparse.ArgumentParser(argument_default=argparse.SUPPRESS, usage=argparse.SUPPRESS, add_help=False) cls.add_default_arguments_to(parser) for arg in cur_as.args: arg.add_argument_to(parser) if cur_subas or actions: subparsers = cls._add_subparsers_required(parser, dest=settings.SUBASSISTANT_N_STRING.format('0')) for subas in sorted(cur_subas, key=lambda x: x[0].name): for alias in [subas[0].name] + getattr(subas[0], 'aliases', []): cls.add_subassistants_to(subparsers, subas, level=1, alias=alias) for action, subactions in sorted(actions.items(), key=lambda x: x[0].name): cls.add_action_to(subparsers, action, subactions, level=1) return parser
Generates argument parser for given assistant tree and actions. Args: tree: assistant tree as returned by devassistant.assistant_base.AssistantBase.get_subassistant_tree actions: dict mapping actions (devassistant.actions.Action subclasses) to their subaction dicts Returns: instance of devassistant_argparse.ArgumentParser (subclass of argparse.ArgumentParser)
def remove_log_action(portal): logger.info("Removing Log Tab ...") portal_types = api.get_tool("portal_types") for name in portal_types.listContentTypes(): ti = portal_types[name] actions = map(lambda action: action.id, ti._actions) for index, action in enumerate(actions): if action == "log": logger.info("Removing Log Action for {}".format(name)) ti.deleteActions([index]) break logger.info("Removing Log Tab [DONE]")
Removes the old Log action from types
def get_table_key(key, d, fallback=""): try: var = d[key] return var except KeyError: logger_misc.info("get_variable_name_table: KeyError: missing {}, use name: {}".format(key, fallback)) return fallback
Try to get a table name from a data table :param str key: Key to try first :param dict d: Data table :param str fallback: (optional) If we don't find a table name, use this as a generic name fallback. :return str var: Data table name
def do_bb_intersect(a, b): return a.p1.x <= b.p2.x \ and a.p2.x >= b.p1.x \ and a.p1.y <= b.p2.y \ and a.p2.y >= b.p1.y
Check if BoundingBox a intersects with BoundingBox b.
def model_factory(schema, resolver=None, base_class=model.Model, name=None): schema = copy.deepcopy(schema) resolver = resolver class Model(base_class): def __init__(self, *args, **kwargs): self.__dict__['schema'] = schema self.__dict__['resolver'] = resolver base_class.__init__(self, *args, **kwargs) if resolver is not None: Model.resolver = resolver if name is not None: Model.__name__ = name elif 'name' in schema: Model.__name__ = str(schema['name']) return Model
Generate a model class based on the provided JSON Schema :param schema: dict representing valid JSON schema :param name: A name to give the class, if `name` is not in `schema`
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
Create a new file bundle.
def _Assign(self, t): self._fill() for target in t.nodes: self._dispatch(target) self._write(" = ") self._dispatch(t.expr) if not self._do_indent: self._write('; ')
Expression Assignment such as "a = 1". This only handles assignment in expressions. Keyword assignment is handled separately.
def security_errors(self): errors = ErrorDict() for f in ["honeypot", "timestamp", "security_hash"]: if f in self.errors: errors[f] = self.errors[f] return errors
Return just those errors associated with security
def rows(self, cell_mode=CellMode.cooked): for row_index in range(self.nrows): yield self.parse_row(self.get_row(row_index), row_index, cell_mode)
Generates a sequence of parsed rows from the worksheet. The cells are parsed according to the cell_mode argument.
def call_spellchecker(cmd, input_text=None, encoding=None): process = get_process(cmd) if input_text is not None: for line in input_text.splitlines(): offset = 0 end = len(line) while True: chunk_end = offset + 0x1fff m = None if chunk_end >= end else RE_LAST_SPACE_IN_CHUNK.search(line, offset, chunk_end) if m: chunk_end = m.start(1) chunk = line[offset:m.start(1)] offset = m.end(1) else: chunk = line[offset:chunk_end] offset = chunk_end if chunk and not chunk.isspace(): process.stdin.write(chunk + b'\n') if offset >= end: break return get_process_output(process, encoding)
Call spell checker with arguments.
def _linreg_future(self, series, since, days=20): last_days = pd.date_range(end=since, periods=days) hist = self.history(last_days) xi = np.array(map(dt2ts, hist.index)) A = np.array([xi, np.ones(len(hist))]) y = hist.values w = np.linalg.lstsq(A.T, y)[0] for d in series.index[series.index > since]: series[d] = w[0] * dt2ts(d) + w[1] series[d] = 0 if series[d] < 0 else series[d] return series
Predicts future using linear regression. :param series: A series in which the values will be places. The index will not be touched. Only the values on dates > `since` will be predicted. :param since: The starting date from which the future will be predicted. :param days: Specifies how many past days should be used in the linear regression.
def clone(self): base_dir = '/'.join(self.path.split('/')[:-2]) try: os.makedirs(base_dir, 0o700) except OSError: pass self._cmd(['git', 'clone', self._clone_url, self.path], cwd=os.getcwd())
Clones a directory based on the clone_url and plugin_name given to the constructor. The clone will be located at self.path.
def _avro_schema(read_session): json_schema = json.loads(read_session.avro_schema.schema) column_names = tuple((field["name"] for field in json_schema["fields"])) return fastavro.parse_schema(json_schema), column_names
Extract and parse Avro schema from a read session. Args: read_session ( \ ~google.cloud.bigquery_storage_v1beta1.types.ReadSession \ ): The read session associated with this read rows stream. This contains the schema, which is required to parse the data blocks. Returns: Tuple[fastavro.schema, Tuple[str]]: A parsed Avro schema, using :func:`fastavro.schema.parse_schema` and the column names for a read session.
def set_active_state(self, name, value): if name not in self.__active_states.keys(): raise ValueError("Can not set unknown state '" + name + "'") if (isinstance(self.__active_states[name], int) and isinstance(value, str)): self.__active_states[name] = int(value) elif (isinstance(self.__active_states[name], float) and isinstance(value, str)): self.__active_states[name] = float(value) else: self.__active_states[name] = value
Set active state.
def get_task_positions_objs(client, list_id): params = { 'list_id' : int(list_id) } response = client.authenticated_request(client.api.Endpoints.TASK_POSITIONS, params=params) return response.json()
Gets a list containing the object that encapsulates information about the order lists are laid out in. This list will always contain exactly one object. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: A list containing a single ListPositionsObj-mapped object
def join(self, table, one=None, operator=None, two=None, type='inner', where=False): if isinstance(table, JoinClause): self.joins.append(table) else: if one is None: raise ArgumentError('Missing "one" argument') join = JoinClause(table, type) self.joins.append(join.on( one, operator, two, 'and', where )) return self
Add a join clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :param type: The join type :type type: str :param where: Whether to use a "where" rather than a "on" :type where: bool :return: The current QueryBuilder instance :rtype: QueryBuilder
def download_all(data_home=None, replace=False): for _, meta in DATASETS.items(): download_data( meta['url'], meta['signature'], data_home=data_home, replace=replace ) print( "Downloaded {} datasets to {}".format(len(DATASETS), get_data_home(data_home)) )
Downloads all the example datasets to the data directory specified by ``get_data_home``. This function ensures that all datasets are available for use with the examples.
def pdftoxml(pdfdata, options=""): pdffout = tempfile.NamedTemporaryFile(suffix='.pdf') pdffout.write(pdfdata) pdffout.flush() xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml') tmpxml = xmlin.name cmd = 'pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes %s "%s" "%s"' % ( options, pdffout.name, os.path.splitext(tmpxml)[0]) cmd = cmd + " >/dev/null 2>&1" os.system(cmd) pdffout.close() xmldata = xmlin.read() xmlin.close() return xmldata.decode('utf-8')
converts pdf file to xml file
def report_estimation_accuracy(request): contracts = ProjectContract.objects.filter( status=ProjectContract.STATUS_COMPLETE, type=ProjectContract.PROJECT_FIXED ) data = [('Target (hrs)', 'Actual (hrs)', 'Point Label')] for c in contracts: if c.contracted_hours() == 0: continue pt_label = "%s (%.2f%%)" % (c.name, c.hours_worked / c.contracted_hours() * 100) data.append((c.contracted_hours(), c.hours_worked, pt_label)) chart_max = max([max(x[0], x[1]) for x in data[1:]]) return render(request, 'timepiece/reports/estimation_accuracy.html', { 'data': json.dumps(data, cls=DecimalEncoder), 'chart_max': chart_max, })
Idea from Software Estimation, Demystifying the Black Art, McConnel 2006 Fig 3-3.
def format_manager(cls): if cls._instance is None: cls._instance = cls().register_entrypoints() return cls._instance
Return the instance singleton, creating if necessary
def paginated_retrieval(methodname, itemtype): return compose( reusable, basic_interaction, map_yield(partial(_params_as_get, methodname)), )
decorator factory for retrieval queries from query params
def load_data_and_build(self, filename, delimiter=","): data = np.genfromtxt( filename, dtype=float, delimiter=delimiter, names=True ) data = data.view(np.float64).reshape(data.shape + (-1,)) X = data[:, 0:-1] Y = data[:, -1] self.build(X=X, Y=Y)
Convenience function for directly working with a data file. This opens a file and reads the data into an array, sets the data as an nparray and list of dimnames @ In, filename, string representing the data file
def update_selected(self, linenum): self.parents = _get_parents(self.funcs, linenum) update_selected_cb(self.parents, self.method_cb) self.parents = _get_parents(self.classes, linenum) update_selected_cb(self.parents, self.class_cb)
Updates the dropdowns to reflect the current class and function.
def register(self, service, provider, singleton=False): def get_singleton(*args, **kwargs): result = self._get_singleton(service) if not result: instantiator = self._get_instantiator(provider) result = instantiator(*args, **kwargs) self._set_singleton(service, result) return result if not callable(provider): self._set_provider(service, lambda *args, **kwargs: provider) elif singleton: self._set_provider(service, get_singleton) else: self._set_provider(service, self._get_instantiator(provider))
Registers a service provider for a given service. @param service A key that identifies the service being registered. @param provider This is either the service being registered, or a callable that will either instantiate it or return it. @param singleton Indicates that the service is to be registered as a singleton. This is only relevant if the provider is a callable. Services that are not callable will always be registered as singletons.
def find_killers(self, var_def, simplified_graph=True): if simplified_graph: graph = self.simplified_data_graph else: graph = self.data_graph if var_def not in graph: return [] killers = [] out_edges = graph.out_edges(var_def, data=True) for _, dst, data in out_edges: if 'type' in data and data['type'] == 'kill': killers.append(dst) return killers
Find all killers to the specified variable definition. :param ProgramVariable var_def: The variable definition. :param bool simplified_graph: True if we want to search in the simplified graph, False otherwise. :return: A collection of all killers to the specified variable definition. :rtype: list
def yaml_load(stream): global _HAS_YAML_LIBRARY if _HAS_YAML_LIBRARY is None: _HAS_YAML_LIBRARY = hasattr(yaml, 'CSafeLoader') if not _HAS_YAML_LIBRARY: logger.warning('libyaml was not found! Please install libyaml to' ' speed up loading the model files.') if _HAS_YAML_LIBRARY: loader = yaml.CSafeLoader(stream) else: loader = yaml.SafeLoader(stream) loader.add_constructor('tag:yaml.org,2002:float', float_constructor) return loader.get_data()
Load YAML file using safe loader.
def get_config(): global token config = configparser.ConfigParser() config.read(os.path.join(os.path.expanduser('~'), '.config/scdl/scdl.cfg')) try: token = config['scdl']['auth_token'] path = config['scdl']['path'] except: logger.error('Are you sure scdl.cfg is in $HOME/.config/scdl/ ?') logger.error('Are both "auth_token" and "path" defined there?') sys.exit() if os.path.exists(path): os.chdir(path) else: logger.error('Invalid path in scdl.cfg...') sys.exit()
Reads the music download filepath from scdl.cfg
def _ldp_id_adjustlen(pkt, x): f, v = pkt.getfield_and_val('id') return len(_LLDPidField.i2m(f, pkt, v)) + 1
Return the length of the `id` field, according to its real encoded type
def static_get_type_attr(t, name): for type_ in t.mro(): try: return vars(type_)[name] except KeyError: pass raise AttributeError(name)
Get a type attribute statically, circumventing the descriptor protocol.
def tconvert(gpsordate='now'): try: float(gpsordate) except (TypeError, ValueError): return to_gps(gpsordate) return from_gps(gpsordate)
Convert GPS times to ISO-format date-times and vice-versa. Parameters ---------- gpsordate : `float`, `astropy.time.Time`, `datetime.datetime`, ... input gps or date to convert, many input types are supported Returns ------- date : `datetime.datetime` or `LIGOTimeGPS` converted gps or date Notes ----- If the input object is a `float` or `LIGOTimeGPS`, it will get converted from GPS format into a `datetime.datetime`, otherwise the input will be converted into `LIGOTimeGPS`. Examples -------- Integers and floats are automatically converted from GPS to `datetime.datetime`: >>> from gwpy.time import tconvert >>> tconvert(0) datetime.datetime(1980, 1, 6, 0, 0) >>> tconvert(1126259462.3910) datetime.datetime(2015, 9, 14, 9, 50, 45, 391000) while strings are automatically converted to `~gwpy.time.LIGOTimeGPS`: >>> to_gps('Sep 14 2015 09:50:45.391') LIGOTimeGPS(1126259462, 391000000) Additionally, a few special-case words as supported, which all return `~gwpy.time.LIGOTimeGPS`: >>> tconvert('now') >>> tconvert('today') >>> tconvert('tomorrow') >>> tconvert('yesterday')
def netloc(self): url = self._tuple if url.username and url.password: netloc = '%s:%s@%s' % (url.username, url.password, url.host) elif url.username and not url.password: netloc = '%s@%s' % (url.username, url.host) else: netloc = url.host if url.port: netloc = '%s:%s' % (netloc, url.port) return netloc
Return the netloc
def redistribute(self, **kwargs): source = kwargs.pop('source') afi = kwargs.pop('afi', 'ipv4') callback = kwargs.pop('callback', self._callback) if afi not in ['ipv4', 'ipv6']: raise AttributeError('Invalid AFI.') args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'), afi=afi, source=source) redistribute = self._redistribute_builder(afi=afi, source=source) config = redistribute(**args) if kwargs.pop('get', False): return callback(config, handler='get_config') if kwargs.pop('delete', False): tag = 'redistribute-%s' % source config.find('.//*%s' % tag).set('operation', 'delete') return callback(config)
Set BGP redistribute properties. Args: vrf (str): The VRF for this BGP process. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. source (str): Source for redistributing. (connected) afi (str): Address family to configure. (ipv4, ipv6) get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `source` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.203', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.redistribute(source='connected', ... rbridge_id='225') ... output = dev.bgp.redistribute(source='connected', ... rbridge_id='225', get=True) ... output = dev.bgp.redistribute(source='connected', ... rbridge_id='225', delete=True) ... dev.bgp.redistribute() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError ... dev.bgp.redistribute(source='connected', rbridge_id='225', ... afi='hodor') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): AttributeError ... dev.bgp.redistribute(source='hodor', rbridge_id='225', ... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): AttributeError
def full_like(other, fill_value, dtype: Union[str, np.dtype, None] = None): from .dataarray import DataArray from .dataset import Dataset from .variable import Variable if isinstance(other, Dataset): data_vars = OrderedDict( (k, _full_like_variable(v, fill_value, dtype)) for k, v in other.data_vars.items()) return Dataset(data_vars, coords=other.coords, attrs=other.attrs) elif isinstance(other, DataArray): return DataArray( _full_like_variable(other.variable, fill_value, dtype), dims=other.dims, coords=other.coords, attrs=other.attrs, name=other.name) elif isinstance(other, Variable): return _full_like_variable(other, fill_value, dtype) else: raise TypeError("Expected DataArray, Dataset, or Variable")
Return a new object with the same shape and type as a given object. Parameters ---------- other : DataArray, Dataset, or Variable The reference object in input fill_value : scalar Value to fill the new object with before returning it. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. Returns ------- out : same as object New object with the same shape and type as other, with the data filled with fill_value. Coords will be copied from other. If other is based on dask, the new one will be as well, and will be split in the same chunks.
def retrieve(self, request, project, pk=None): job_id, bug_id = map(int, pk.split("-")) job = Job.objects.get(repository__name=project, id=job_id) try: bug_job_map = BugJobMap.objects.get(job=job, bug_id=bug_id) serializer = BugJobMapSerializer(bug_job_map) return Response(serializer.data) except BugJobMap.DoesNotExist: return Response("Object not found", status=HTTP_404_NOT_FOUND)
Retrieve a bug-job-map entry. pk is a composite key in the form bug_id-job_id
def reload_exports(): ret = {} command = 'exportfs -r' output = __salt__['cmd.run_all'](command) ret['stdout'] = output['stdout'] ret['stderr'] = output['stderr'] ret['result'] = output['stderr'] == '' return ret
Trigger a reload of the exports file to apply changes CLI Example: .. code-block:: bash salt '*' nfs3.reload_exports
def cmd_xor(k, i, o): o.write(xor(i.read(), k.encode()))
XOR cipher. Note: XOR is not a 'secure cipher'. If you need strong crypto you must use algorithms like AES. You can use habu.fernet for that. Example: \b $ habu.xor -k mysecretkey -i /bin/ls > xored $ habu.xor -k mysecretkey -i xored > uxored $ sha1sum /bin/ls uxored $ 6fcf930fcee1395a1c95f87dd38413e02deff4bb /bin/ls $ 6fcf930fcee1395a1c95f87dd38413e02deff4bb uxored
def delete(name): with Session() as session: try: session.VFolder(name).delete() print_done('Deleted.') except Exception as e: print_error(e) sys.exit(1)
Delete the given virtual folder. This operation is irreversible! NAME: Name of a virtual folder.
def catch_lane_change(self): if self.current.lane_name: if self.current.old_lane and self.current.lane_name != self.current.old_lane: if (self.current.lane_id not in self.current.pool or self.current.pool[self.current.lane_id] != self.current.user_id): self.current.log.info("LANE CHANGE : %s >> %s" % (self.current.old_lane, self.current.lane_name)) if self.current.lane_auto_sendoff: self.current.sendoff_current_user() self.current.flow_enabled = False if self.current.lane_auto_invite: self.current.invite_other_parties(self._get_possible_lane_owners()) return True
trigger a lane_user_change signal if we switched to a new lane and new lane's user is different from current one
def update_score(self): if not self.subject_token: return vote_score = 0 replies_score = 0 for msg in self.message_set.all(): replies_score += self._get_score(300, msg.received_time) for vote in msg.vote_set.all(): vote_score += self._get_score(100, vote.created) page_view_score = self.hits * 10 self.score = (page_view_score + vote_score + replies_score) // 10 self.save()
Update the relevance score for this thread. The score is calculated with the following variables: * vote_weight: 100 - (minus) 1 for each 3 days since voted with minimum of 5. * replies_weight: 300 - (minus) 1 for each 3 days since replied with minimum of 5. * page_view_weight: 10. * vote_score: sum(vote_weight) * replies_score: sum(replies_weight) * page_view_score: sum(page_view_weight) * score = (vote_score + replies_score + page_view_score) // 10 with minimum of 0 and maximum of 5000
def get_changeform_initial_data(self, request): initial = super(PageAdmin, self).get_changeform_initial_data(request) if ('translation_of' in request.GET): original = self.model._tree_manager.get( pk=request.GET.get('translation_of')) initial['layout'] = original.layout initial['theme'] = original.theme initial['color_scheme'] = original.color_scheme old_lang = translation.get_language() translation.activate(request.GET.get('language')) title = _(original.title) if title != original.title: initial['title'] = title initial['slug'] = slugify(title) translation.activate(old_lang) return initial
Copy initial data from parent
def _fromGUI(self, value): if value == '': if not self.IsNoneAllowed(): return 0 else: return else: try: return int(value) except ValueError: if self.IsLongAllowed(): try: return long(value) except ValueError: wx.TextCtrl.SetValue(self, "0") return 0 else: raise
Conversion function used in getting the value of the control.
def touch_model(self, model, **data): instance, created = model.objects.get_or_create(**data) if not created: if instance.updated < self.import_start_datetime: instance.save() return (instance, created)
This method create or look up a model with the given data it saves the given model if it exists, updating its updated field
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear'): self._check_percentile(q) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 if is_transposed: data = data.T result = data._data.quantile(qs=q, axis=1, interpolation=interpolation, transposed=is_transposed) if result.ndim == 2: result = self._constructor(result) else: result = self._constructor_sliced(result, name=q) if is_transposed: result = result.T return result
Return values at the given quantile over requested axis. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value between 0 <= q <= 1, the quantile(s) to compute. axis : {0, 1, 'index', 'columns'} (default 0) Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. .. versionadded:: 0.18.0 Returns ------- Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also -------- core.window.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), ... columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 Name: 0.1, dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. >>> df = pd.DataFrame({'A': [1, 2], ... 'B': [pd.Timestamp('2010'), ... pd.Timestamp('2011')], ... 'C': [pd.Timedelta('1 days'), ... pd.Timedelta('2 days')]}) >>> df.quantile(0.5, numeric_only=False) A 1.5 B 2010-07-02 12:00:00 C 1 days 12:00:00 Name: 0.5, dtype: object
def get_first_molecule(self): title, coordinates = self._first molecule = Molecule(self.numbers, coordinates, title, symbols=self.symbols) return molecule
Get the first molecule from the trajectory This can be useful to configure your program before handeling the actual trajectory.
def _new_point(self, loglstar, logvol): ncall, nupdate = 0, 0 while True: u, v, logl, nc, blob = self._get_point_value(loglstar) ncall += nc ucheck = ncall >= self.update_interval * (1 + nupdate) bcheck = self._beyond_unit_bound(loglstar) if blob is not None and self.nqueue <= 0 and bcheck: self.update_proposal(blob) if logl >= loglstar: break if ucheck and bcheck: pointvol = math.exp(logvol) / self.nlive bound = self.update(pointvol) if self.save_bounds: self.bound.append(bound) self.nbound += 1 nupdate += 1 self.since_update = -ncall return u, v, logl, ncall
Propose points until a new point that satisfies the log-likelihood constraint `loglstar` is found.
def get(self, key, **kwargs): return self._get('/'.join([self._endpoint, key]), payload=kwargs)
Fetch value at the given key kwargs can hold `recurse`, `wait` and `index` params
def get_regex(regex): if isinstance(regex, basestring): return re.compile(regex) elif not isinstance(regex, re._pattern_type): raise TypeError("Invalid regex type: %r" % (regex,)) return regex
Ensure we have a compiled regular expression object. >>> import re >>> get_regex('string') # doctest: +ELLIPSIS <_sre.SRE_Pattern object at 0x...> >>> pattern = re.compile(r'string') >>> get_regex(pattern) is pattern True >>> get_regex(3) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: Invalid regex type: 3
def copy_to_clipboard(self, url): if url is None: self.term.flash() return try: clipboard_copy(url) except (ProgramError, OSError) as e: _logger.exception(e) self.term.show_notification( 'Failed to copy url: {0}'.format(e)) else: self.term.show_notification( ['Copied to clipboard:', url], timeout=1)
Attempt to copy the selected URL to the user's clipboard
def get_joining_group_property(value, is_bytes=False): obj = unidata.ascii_joining_group if is_bytes else unidata.unicode_joining_group if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['joininggroup'].get(negated, negated) else: value = unidata.unicode_alias['joininggroup'].get(value, value) return obj[value]
Get `JOINING GROUP` property.
def _validate_iterable(iterable_type, value): if isinstance(value, six.string_types): msg = "Invalid iterable of type(%s): %s" raise ValidationError(msg % (type(value), value)) try: return iterable_type(value) except TypeError: raise ValidationError("Invalid iterable: %s" % (value))
Convert the iterable to iterable_type, or raise a Configuration exception.