positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF): """Render a preview image of a page. Parameters ---------- page: positive integer Which page to render. Must be in the range [1, page_count] filelike: path or file-like object Can be a filename as a string, a Python file object, or something which behaves like a Python file object. For example, if you were using the Django web framework, an HttpResponse object could be passed to render the preview to the browser (as long as you remember to set the mimetype of the response). If you pass a filename, the existing contents will be overwritten. format: string The image format to use for the preview. ReportLab uses the Python Imaging Library (PIL) internally, so any PIL format should be supported. dpi: positive real The dots-per-inch to use when rendering. background_colour: Hex colour specification What color background to use. Notes ----- If you are creating this sheet for a preview only, you can pass the pages_to_draw parameter to the constructor to avoid the drawing function being called for all the labels on pages you'll never look at. If you preview a page you did not tell the sheet to draw, you will get a blank image. Raises ------ ValueError: If the page number is not valid. """ # Check the page number. if page < 1 or page > self.page_count: raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count)) # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Rendering to an image (as opposed to a PDF) requires any background # to have an integer width and height if it is a ReportLab Image # object. Drawing objects are exempt from this. oldw, oldh = None, None if isinstance(self._bgimage, Image): oldw, oldh = self._bgimage.width, self._bgimage.height self._bgimage.width = int(oldw) + 1 self._bgimage.height = int(oldh) + 1 # Let ReportLab do the heavy lifting. renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour) # Restore the size of the background image if we changed it. if oldw: self._bgimage.width = oldw self._bgimage.height = oldh
Render a preview image of a page. Parameters ---------- page: positive integer Which page to render. Must be in the range [1, page_count] filelike: path or file-like object Can be a filename as a string, a Python file object, or something which behaves like a Python file object. For example, if you were using the Django web framework, an HttpResponse object could be passed to render the preview to the browser (as long as you remember to set the mimetype of the response). If you pass a filename, the existing contents will be overwritten. format: string The image format to use for the preview. ReportLab uses the Python Imaging Library (PIL) internally, so any PIL format should be supported. dpi: positive real The dots-per-inch to use when rendering. background_colour: Hex colour specification What color background to use. Notes ----- If you are creating this sheet for a preview only, you can pass the pages_to_draw parameter to the constructor to avoid the drawing function being called for all the labels on pages you'll never look at. If you preview a page you did not tell the sheet to draw, you will get a blank image. Raises ------ ValueError: If the page number is not valid.
def get_hardware_info(self): """ Returns the extended hardware information of a device. With multi-channel USB-CANmoduls the information for both CAN channels are returned separately. :return: Tuple with extended hardware information structure (see structure :class:`HardwareInfoEx`) and structures with information of CAN channel 0 and 1 (see structure :class:`ChannelInfo`). :rtype: tuple(HardwareInfoEx, ChannelInfo, ChannelInfo) """ hw_info_ex = HardwareInfoEx() can_info_ch0, can_info_ch1 = ChannelInfo(), ChannelInfo() UcanGetHardwareInfoEx2(self._handle, byref(hw_info_ex), byref(can_info_ch0), byref(can_info_ch1)) return hw_info_ex, can_info_ch0, can_info_ch1
Returns the extended hardware information of a device. With multi-channel USB-CANmoduls the information for both CAN channels are returned separately. :return: Tuple with extended hardware information structure (see structure :class:`HardwareInfoEx`) and structures with information of CAN channel 0 and 1 (see structure :class:`ChannelInfo`). :rtype: tuple(HardwareInfoEx, ChannelInfo, ChannelInfo)
def _mark_html_fields_as_safe(self, page): """ Mark the html content as safe so we don't have to use the safe template tag in all cms templates: """ page.title = mark_safe(page.title) page.content = mark_safe(page.content) return page
Mark the html content as safe so we don't have to use the safe template tag in all cms templates:
def get_perm_by_code(perm_code,**kwargs): """ Get a permission by its code """ try: perm = db.DBSession.query(Perm).filter(Perm.code==perm_code).one() return perm except NoResultFound: raise ResourceNotFoundError("Permission not found (perm_code={})".format(perm_code))
Get a permission by its code
def validate_current_password(self, value): """ current password check """ if self.instance and self.instance.has_usable_password() and not self.instance.check_password(value): raise serializers.ValidationError(_('Current password is not correct')) return value
current password check
def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None): """get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page """ startDate, endDate = self.__get_time_range(startDate, endDate) response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate]) return response
get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page
def load_module(filename): """ Loads a module by filename """ basename = os.path.basename(filename) path = os.path.dirname(filename) sys.path.append(path) # TODO(tlan) need to figure out how to handle errors thrown here return __import__(os.path.splitext(basename)[0])
Loads a module by filename
def silent(cmd, **kwargs): """Calls the given shell command. Output will not be displayed. Returns the status code. **Examples**: :: auxly.shell.silent("ls") """ return call(cmd, shell=True, stdout=NULL, stderr=NULL, **kwargs)
Calls the given shell command. Output will not be displayed. Returns the status code. **Examples**: :: auxly.shell.silent("ls")
def load_qrandom(): """ Loads a set of 10000 random numbers generated by qrandom. This dataset can be used when you want to do some limited tests with "true" random data without an internet connection. Returns: int array the dataset """ fname = "datasets/qrandom.npy" with pkg_resources.resource_stream(__name__, fname) as f: return np.load(f)
Loads a set of 10000 random numbers generated by qrandom. This dataset can be used when you want to do some limited tests with "true" random data without an internet connection. Returns: int array the dataset
def get_location(self, project_path, source, position, filename): """Return line number and file path where name under cursor is defined If line is None location wasn't finded. If file path is None, defenition is located in the same source. :param project_path: absolute project path :param source: unicode or byte string code source :param position: character or byte cursor position :param filename: absolute path of file with source code :returns: tuple (lineno, file path) """ return self._call('get_location', project_path, source, position, filename)
Return line number and file path where name under cursor is defined If line is None location wasn't finded. If file path is None, defenition is located in the same source. :param project_path: absolute project path :param source: unicode or byte string code source :param position: character or byte cursor position :param filename: absolute path of file with source code :returns: tuple (lineno, file path)
def to_subject_paths(paths): ''' to_subject_paths(paths) accepts either a string that is a :-separated list of directories or a list of directories and yields a list of all the existing directories. ''' if paths is None: return [] if pimms.is_str(paths): paths = paths.split(':') paths = [os.path.expanduser(p) for p in paths] return [p for p in paths if os.path.isdir(p)]
to_subject_paths(paths) accepts either a string that is a :-separated list of directories or a list of directories and yields a list of all the existing directories.
def extendedEuclid(a, b): """return a tuple of three values: x, y and z, such that x is the GCD of a and b, and x = y * a + z * b""" if a == 0: return b, 0, 1 else: g, y, x = extendedEuclid(b % a, a) return g, x - (b // a) * y, y
return a tuple of three values: x, y and z, such that x is the GCD of a and b, and x = y * a + z * b
def load_global_config(config_path): """ Load a global configuration object, and query for any required variables along the way """ config = configparser.RawConfigParser() if os.path.exists(config_path): logger.debug("Checking and setting global parameters...") config.read(config_path) else: _initial_run() logger.info("Unable to find a global sprinter configuration!") logger.info("Creating one now. Please answer some questions" + " about what you would like sprinter to do.") logger.info("") # checks and sets sections if not config.has_section('global'): config.add_section('global') configure_config(config) write_config(config, config_path) return config
Load a global configuration object, and query for any required variables along the way
def _get_caller_globals_and_locals(): """ Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here? """ caller_frame = inspect.stack()[2] myglobals = caller_frame[0].f_globals mylocals = caller_frame[0].f_locals return myglobals, mylocals
Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here?
def word_list_to_long(val_list, big_endian=True): """Word list (16 bits int) to long list (32 bits int) By default word_list_to_long() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 16 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 32 bits int value :rtype: list """ # allocate list for long int long_list = [None] * int(len(val_list) / 2) # fill registers list with register items for i, item in enumerate(long_list): if big_endian: long_list[i] = (val_list[i * 2] << 16) + val_list[(i * 2) + 1] else: long_list[i] = (val_list[(i * 2) + 1] << 16) + val_list[i * 2] # return long list return long_list
Word list (16 bits int) to long list (32 bits int) By default word_list_to_long() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 16 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 32 bits int value :rtype: list
def GetElevation(self, latitude, longitude, timeout=0): '''Returns the altitude (m ASL) of a given lat/long pair, or None if unknown''' if latitude is None or longitude is None: return None if self.database == 'srtm': TileID = (numpy.floor(latitude), numpy.floor(longitude)) if TileID in self.tileDict: alt = self.tileDict[TileID].getAltitudeFromLatLon(latitude, longitude) else: tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude)) if tile == 0: if timeout > 0: t0 = time.time() while time.time() < t0+timeout and tile == 0: tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude)) if tile == 0: time.sleep(0.1) if tile == 0: return None self.tileDict[TileID] = tile alt = tile.getAltitudeFromLatLon(latitude, longitude) if self.database == 'geoscience': alt = self.mappy.getAltitudeAtPoint(latitude, longitude) return alt
Returns the altitude (m ASL) of a given lat/long pair, or None if unknown
def factors(self): """ Access the factors :returns: twilio.rest.authy.v1.service.entity.factor.FactorList :rtype: twilio.rest.authy.v1.service.entity.factor.FactorList """ if self._factors is None: self._factors = FactorList( self._version, service_sid=self._solution['service_sid'], identity=self._solution['identity'], ) return self._factors
Access the factors :returns: twilio.rest.authy.v1.service.entity.factor.FactorList :rtype: twilio.rest.authy.v1.service.entity.factor.FactorList
def log_progress(sequence, every=None, size=None, name='Items'): """Taken from https://github.com/alexanderkuk/log-progress""" from ipywidgets import IntProgress, HTML, VBox from IPython.display import display is_iterator = False if size is None: try: size = len(sequence) except TypeError: is_iterator = True if size is not None: if every is None: if size <= 200: every = 1 else: every = int(size / 200) # every 0.5% else: assert every is not None, 'sequence is iterator, set every' if is_iterator: progress = IntProgress(min=0, max=1, value=1) progress.bar_style = 'info' else: progress = IntProgress(min=0, max=size, value=0) label = HTML() box = VBox(children=[label, progress]) display(box) index = 0 try: for index, record in enumerate(sequence, 1): if index == 1 or index % every == 0: if is_iterator: label.value = '{name}: {index} / ?'.format( name=name, index=index ) else: progress.value = index label.value = u'{name}: {index} / {size}'.format( name=name, index=index, size=size ) yield record except Exception as e: progress.bar_style = 'danger' raise else: progress.bar_style = 'success' progress.value = index label.value = "{name}: {index}".format( name=name, index=str(index or '?') )
Taken from https://github.com/alexanderkuk/log-progress
def quantile(self, q, dim=None, interpolation='linear'): """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float in range of [0,1] (or sequence of floats) Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile """ if isinstance(self.data, dask_array_type): raise TypeError("quantile does not work for arrays stored as dask " "arrays. Load the data via .compute() or .load() " "prior to calling this method.") q = np.asarray(q, dtype=np.float64) new_dims = list(self.dims) if dim is not None: axis = self.get_axis_num(dim) if utils.is_scalar(dim): new_dims.remove(dim) else: for d in dim: new_dims.remove(d) else: axis = None new_dims = [] # only add the quantile dimension if q is array like if q.ndim != 0: new_dims = ['quantile'] + new_dims qs = np.nanpercentile(self.data, q * 100., axis=axis, interpolation=interpolation) return Variable(new_dims, qs)
Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float in range of [0,1] (or sequence of floats) Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile
def authentication(self, username, password): """Configures the user authentication for eAPI This method configures the username and password combination to use for authenticating to eAPI. Args: username (str): The username to use to authenticate the eAPI connection with password (str): The password in clear text to use to authenticate the eAPI connection with """ _auth_text = '{}:{}'.format(username, password) # Work around for Python 2.7/3.x compatibility if int(sys.version[0]) > 2: # For Python 3.x _auth_bin = base64.encodebytes(_auth_text.encode()) _auth = _auth_bin.decode() _auth = _auth.replace('\n', '') self._auth = _auth else: # For Python 2.7 _auth = base64.encodestring(_auth_text) self._auth = str(_auth).replace('\n', '') _LOGGER.debug('Autentication string is: {}:***'.format(username))
Configures the user authentication for eAPI This method configures the username and password combination to use for authenticating to eAPI. Args: username (str): The username to use to authenticate the eAPI connection with password (str): The password in clear text to use to authenticate the eAPI connection with
def upsert(self, document, cond): """ Update a document, if it exist - insert it otherwise. Note: this will update *all* documents matching the query. :param document: the document to insert or the fields to update :param cond: which document to look for :returns: a list containing the updated document's ID """ updated_docs = self.update(document, cond) if updated_docs: return updated_docs else: return [self.insert(document)]
Update a document, if it exist - insert it otherwise. Note: this will update *all* documents matching the query. :param document: the document to insert or the fields to update :param cond: which document to look for :returns: a list containing the updated document's ID
def getServiceNamesToTraceIds(self, time_stamp, service_name, rpc_name): """ Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired with the lists of every trace Ids (list<i64>) from the server to client. The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps contains the key - client_service_name and value - list<trace_id>. Parameters: - time_stamp - service_name - rpc_name """ self.send_getServiceNamesToTraceIds(time_stamp, service_name, rpc_name) return self.recv_getServiceNamesToTraceIds()
Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired with the lists of every trace Ids (list<i64>) from the server to client. The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps contains the key - client_service_name and value - list<trace_id>. Parameters: - time_stamp - service_name - rpc_name
def add_label(self, name, color): """Add a new label. It's id will automatically be calculated.""" color_upper = color.upper() if not self._color_re.match(color_upper): raise ValueError('Invalid color: {}'.format(color)) labels_tag = self.root[0] last_id = int(labels_tag[-1].get('id')) new_id = str(last_id + 1) new_label = etree.Element('label', id=new_id, color=color_upper) new_label.text = name labels_tag.append(new_label)
Add a new label. It's id will automatically be calculated.
def cut(self, name, disconnect=False): """ Cut a wire (undo a wire() call) Arguments: - name (str): name of the wire Keyword Arguments: - disconnect (bool): if True also disconnect all connections on the specified wire """ wire = getattr(self, name, None) if wire and isinstance(wire, Wire): if name != "main": delattr(self, name) if disconnect: wire.disconnect() wire.off("receive", self.on_receive) if self.main == wire: self.main = None self.set_main_wire()
Cut a wire (undo a wire() call) Arguments: - name (str): name of the wire Keyword Arguments: - disconnect (bool): if True also disconnect all connections on the specified wire
def proxy_servers(self): """ Return the proxy servers available. First env variables will be searched and updated with values from condarc config file. """ proxy_servers = {} if self._load_rc_func is None: return proxy_servers else: HTTP_PROXY = os.environ.get('HTTP_PROXY') HTTPS_PROXY = os.environ.get('HTTPS_PROXY') if HTTP_PROXY: proxy_servers['http'] = HTTP_PROXY if HTTPS_PROXY: proxy_servers['https'] = HTTPS_PROXY proxy_servers_conf = self._load_rc_func().get('proxy_servers', {}) proxy_servers.update(proxy_servers_conf) return proxy_servers
Return the proxy servers available. First env variables will be searched and updated with values from condarc config file.
def gantry_axes(cls) -> Tuple['Axis', 'Axis', 'Axis', 'Axis']: """ The axes which are tied to the gantry and require the deck calibration transform """ return (cls.X, cls.Y, cls.Z, cls.A)
The axes which are tied to the gantry and require the deck calibration transform
def to_dict(value, prefix=None, separators="=,"): """ Args: value: Value to turn into a dict prefix (str | unicode | None): Optional prefix for keys (if provided, `prefix.` is added to all keys) separators (str | unicode): 2 chars: 1st is assignment separator, 2nd is key-value pair separator Returns: (dict): Parse key/values """ if not value or isinstance(value, dict): return value or {} result = {} for val in flattened(value, split=(separators[1], SANITIZED)): if not val: continue if hasattr(val, "partition"): k, _, v = val.partition(separators[0]) k = k.strip() if k: v = v.strip() if prefix and not k.startswith(prefix): k = "%s.%s" % (prefix, k) result[k] = v return result
Args: value: Value to turn into a dict prefix (str | unicode | None): Optional prefix for keys (if provided, `prefix.` is added to all keys) separators (str | unicode): 2 chars: 1st is assignment separator, 2nd is key-value pair separator Returns: (dict): Parse key/values
def add(connect_spec, dn, attributes): '''Add an entry to an LDAP database. :param connect_spec: See the documentation for the ``connect_spec`` parameter for :py:func:`connect`. :param dn: Distinguished name of the entry. :param attributes: Non-empty dict mapping each of the new entry's attributes to a non-empty iterable of values. :returns: ``True`` if successful, raises an exception otherwise. CLI example: .. code-block:: bash salt '*' ldap3.add "{ 'url': 'ldaps://ldap.example.com/', 'bind': { 'method': 'simple', 'password': 'secret', }, }" "dn='dc=example,dc=com'" "attributes={'example': 'values'}" ''' l = connect(connect_spec) # convert the "iterable of values" to lists in case that's what # addModlist() expects (also to ensure that the caller's objects # are not modified) attributes = dict(((attr, salt.utils.data.encode(list(vals))) for attr, vals in six.iteritems(attributes))) log.info('adding entry: dn: %s attributes: %s', repr(dn), repr(attributes)) if 'unicodePwd' in attributes: attributes['unicodePwd'] = [_format_unicode_password(x) for x in attributes['unicodePwd']] modlist = ldap.modlist.addModlist(attributes), try: l.c.add_s(dn, modlist) except ldap.LDAPError as e: _convert_exception(e) return True
Add an entry to an LDAP database. :param connect_spec: See the documentation for the ``connect_spec`` parameter for :py:func:`connect`. :param dn: Distinguished name of the entry. :param attributes: Non-empty dict mapping each of the new entry's attributes to a non-empty iterable of values. :returns: ``True`` if successful, raises an exception otherwise. CLI example: .. code-block:: bash salt '*' ldap3.add "{ 'url': 'ldaps://ldap.example.com/', 'bind': { 'method': 'simple', 'password': 'secret', }, }" "dn='dc=example,dc=com'" "attributes={'example': 'values'}"
def regexpr_validator(value): """ Test that ``value`` is a valid regular expression :param unicode value: A regular expression to test :raises ValidationError: if ``value`` is not a valid regular expression """ try: re.compile(value) except re.error: raise ValidationError( _('"%(value)s" is not a valid regular expression'), params={'value': value} )
Test that ``value`` is a valid regular expression :param unicode value: A regular expression to test :raises ValidationError: if ``value`` is not a valid regular expression
def hash(cls, path, digest=None, hasher=sha1): """Return the digest of a single file in a memory-efficient manner.""" if digest is None: digest = hasher() with open(path, 'rb') as fh: cls.update_hash(fh, digest) return digest.hexdigest()
Return the digest of a single file in a memory-efficient manner.
def download(self, streamed=False, action=None, chunk_size=1024, **kwargs): """Download the archive of a project export. Args: streamed (bool): If True the data will be processed by chunks of `chunk_size` and each chunk is passed to `action` for reatment action (callable): Callable responsible of dealing with chunk of data chunk_size (int): Size of each chunk **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: str: The blob content if streamed is False, None otherwise """ path = '/projects/%s/export/download' % self.project_id result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs) return utils.response_content(result, streamed, action, chunk_size)
Download the archive of a project export. Args: streamed (bool): If True the data will be processed by chunks of `chunk_size` and each chunk is passed to `action` for reatment action (callable): Callable responsible of dealing with chunk of data chunk_size (int): Size of each chunk **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: str: The blob content if streamed is False, None otherwise
def nl_msg_in_handler_debug(msg, arg): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114.""" ofd = arg or _LOGGER.debug ofd('-- Debug: Received Message:') nl_msg_dump(msg, ofd) return NL_OK
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114.
def cancel_id(cls, id): """ Cancels command denoted by this id Args: `id`: command id """ conn = Qubole.agent() data = {"status": "kill"} return conn.put(cls.element_path(id), data)
Cancels command denoted by this id Args: `id`: command id
def getSonarData(self): ''' Returns last LaserData. @return last JdeRobotTypes LaserData saved ''' if self.hasproxy(): self.lock.acquire() sonar = self.sonar self.lock.release() return sonar return None
Returns last LaserData. @return last JdeRobotTypes LaserData saved
def set_comment(self, cellid, comment): """Saves the provided comment to the current dataset. :param cellid: number of the current cell :type cellid: int :param comment: a message to add documentation to data :type comment: str """ info = {'cellid': cellid, 'comment': comment} self.datafile.set_metadata(self.current_dataset_name, info)
Saves the provided comment to the current dataset. :param cellid: number of the current cell :type cellid: int :param comment: a message to add documentation to data :type comment: str
def geom_dict_to_array_dict(geom_dict, coord_names=['Longitude', 'Latitude']): """ Converts a dictionary containing an geometry key to a dictionary of x- and y-coordinate arrays and if present a list-of-lists of hole array. """ x, y = coord_names geom = geom_dict['geometry'] new_dict = {k: v for k, v in geom_dict.items() if k != 'geometry'} array = geom_to_array(geom) new_dict[x] = array[:, 0] new_dict[y] = array[:, 1] if geom.geom_type == 'Polygon': holes = [] for interior in geom.interiors: holes.append(geom_to_array(interior)) if holes: new_dict['holes'] = [holes] elif geom.geom_type == 'MultiPolygon': outer_holes = [] for g in geom: holes = [] for interior in g.interiors: holes.append(geom_to_array(interior)) outer_holes.append(holes) if any(hs for hs in outer_holes): new_dict['holes'] = outer_holes return new_dict
Converts a dictionary containing an geometry key to a dictionary of x- and y-coordinate arrays and if present a list-of-lists of hole array.
def get_chrom_for_transcript(self, transcript_id, hgnc_id): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "application/json"} self.attempt = 0 ext = "/overlap/id/{}?feature=gene".format(transcript_id) r = self.ensembl_request(ext, headers) for gene in json.loads(r): if gene["external_name"] == hgnc_id: return gene["seq_region_name"] return None
obtain the sequence for a transcript from ensembl
def from_parfiles(cls,pst,parfile_names,real_names=None): """ create a parameter ensemble from parfiles. Accepts parfiles with less than the parameters in the control (get NaNs in the ensemble) or extra parameters in the parfiles (get dropped) Parameters: pst : pyemu.Pst parfile_names : list of str par file names real_names : str optional list of realization names. If None, a single integer counter is used Returns: pyemu.ParameterEnsemble """ if isinstance(pst,str): pst = pyemu.Pst(pst) dfs = {} if real_names is not None: assert len(real_names) == len(parfile_names) else: real_names = np.arange(len(parfile_names)) for rname,pfile in zip(real_names,parfile_names): assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \ "file: {0} not found".format(pfile) df = read_parfile(pfile) #check for scale differences - I don't who is dumb enough #to change scale between par files and pst... diff = df.scale - pst.parameter_data.scale if diff.apply(np.abs).sum() > 0.0: warnings.warn("differences in scale detected, applying scale in par file", PyemuWarning) #df.loc[:,"parval1"] *= df.scale dfs[rname] = df.parval1.values df_all = pd.DataFrame(data=dfs).T df_all.columns = df.index if len(pst.par_names) != df_all.shape[1]: #if len(pst.par_names) < df_all.shape[1]: # raise Exception("pst is not compatible with par files") pset = set(pst.par_names) dset = set(df_all.columns) diff = pset.difference(dset) if len(diff) > 0: warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}". format(','.join(diff)),PyemuWarning) blank_df = pd.DataFrame(index=df_all.index,columns=diff) df_all = pd.concat([df_all,blank_df],axis=1) diff = dset.difference(pset) if len(diff) > 0: warnings.warn("the following par file parameters are not in the control (being dropped):{0}". format(','.join(diff)),PyemuWarning) df_all = df_all.loc[:, pst.par_names] return ParameterEnsemble.from_dataframe(df=df_all,pst=pst)
create a parameter ensemble from parfiles. Accepts parfiles with less than the parameters in the control (get NaNs in the ensemble) or extra parameters in the parfiles (get dropped) Parameters: pst : pyemu.Pst parfile_names : list of str par file names real_names : str optional list of realization names. If None, a single integer counter is used Returns: pyemu.ParameterEnsemble
def _format(self, posts): """ This method is called by get_content() method""" if posts.__class__ == Post: # format a single post return posts.dict() formated_posts = [] for post in posts: formated_posts.append(post.dict()) return formated_posts
This method is called by get_content() method
def _set_buttons(self, chat, bot): """ Helper methods to set the buttons given the input sender and chat. """ if isinstance(self.reply_markup, ( types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)): self._buttons = [[ MessageButton(self._client, button, chat, bot, self.id) for button in row.buttons ] for row in self.reply_markup.rows] self._buttons_flat = [x for row in self._buttons for x in row]
Helper methods to set the buttons given the input sender and chat.
def get_functions_writing_to_variable(self, variable): ''' Return the functions writting the variable ''' return [f for f in self.functions if f.is_writing(variable)]
Return the functions writting the variable
def _save_one(self, model, ctx): """ Saves the created instance. """ assert isinstance(ctx, ResourceQueryContext) self._orm.add(model) self._orm.flush()
Saves the created instance.
def get_last_ticker(self, symbol, _async=False): """ 获取tradedetail :param symbol :return: """ params = {'symbol': symbol} url = u.MARKET_URL + '/market/trade' return http_get_request(url, params, _async=_async)
获取tradedetail :param symbol :return:
def getElementById(self, _id): ''' getElementById - Search children of this tag for a tag containing an id @param _id - String of id @return - AdvancedTag or None ''' for child in self.children: if child.getAttribute('id') == _id: return child found = child.getElementById(_id) if found is not None: return found return None
getElementById - Search children of this tag for a tag containing an id @param _id - String of id @return - AdvancedTag or None
def process_docopts(test=None): # type: (Optional[Dict[str,Any]])->None """ Just process the command line options and commands :return: """ if test: arguments = test else: arguments = docopt(__doc__, version="Jiggle Version {0}".format(__version__)) logger.debug(arguments) file_opener = FileOpener() central_module_finder = CentralModuleFinder(file_opener) if arguments["--module"]: central_module = arguments["--module"] elif arguments["--project"]: # soon to be deprecated in favor of module/package central_module = arguments["--project"] else: # infer it the best we can. central_module = central_module_finder.find_central_module() if arguments["--init"]: force_init = arguments["--init"] if force_init == "False": force_init = False if force_init == "True": force_init = True else: force_init = False if arguments["here"]: # TODO: find better way to turn debugging on & off # console_trace(logging.DEBUG) module_finder = ModuleFinder(file_opener) guess_src_dir = module_finder.extract_package_dir() if not guess_src_dir: guess_src_dir = "" if not central_module: # check if exists first? central_module = "setup.py" bump_version( project=central_module, source=guess_src_dir, force_init=force_init ) elif arguments["find"]: # Only show errors. Rest of extraneous console output messes up this: # jiggle_version find>version.txt if arguments["--project"]: central_module = arguments["--project"] find_version(project=central_module, source="", force_init=force_init) else: if arguments["--project"]: central_module = arguments["--project"] bump_version( project=arguments["--project"], source=arguments["--source"], force_init=force_init, )
Just process the command line options and commands :return:
def apmAggregate(self, **criteria): """collect all match history's apm data to report player's calculated MMR""" apms = [m.apm(self) for m in self.matchSubset(**criteria)] if not apms: return 0 # no apm information without match history return sum(apms) / len(apms)
collect all match history's apm data to report player's calculated MMR
def getOrderVectorEGMM(self): """ Returns a list of lists. Each list represents tiers of candidates. candidates in earlier tiers are preferred to candidates appearing in later tiers. Candidates in the same tier are preferred equally. """ # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self.getIncEdgesMap() sortedKeys = sorted(incEdgesMap.keys()) orderVector = [] # print("sortedKeys",sortedKeys) # print("incEdgesMap", incEdgesMap) m = 0 for key in sortedKeys: m += len(incEdgesMap[key]) result = [0] * m for k in range(0, len(sortedKeys)): key = sortedKeys[k] cands = incEdgesMap[key] # print("qq",cands) for cand in cands: result[cand] = len(sortedKeys) - (k + 1) return result
Returns a list of lists. Each list represents tiers of candidates. candidates in earlier tiers are preferred to candidates appearing in later tiers. Candidates in the same tier are preferred equally.
def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread.
def free_source(self, name, free=True, pars=None, **kwargs): """Free/Fix parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False) source parameters. pars : list Set a list of parameters to be freed/fixed for this source. If none then all source parameters will be freed/fixed with the exception of those defined in the skip_pars list. """ free_pars = self.get_free_param_vector() loglevel = kwargs.pop('loglevel', self.loglevel) # Find the source src = self.roi.get_source_by_name(name) name = src.name if pars is None or (isinstance(pars, list) and not pars): pars = [] pars += norm_parameters.get(src['SpectrumType'], []) pars += shape_parameters.get(src['SpectrumType'], []) elif pars == 'norm': pars = [] pars += norm_parameters.get(src['SpectrumType'], []) elif pars == 'shape': pars = [] pars += shape_parameters.get(src['SpectrumType'], []) elif isinstance(pars, list): pass else: raise Exception('Invalid parameter list.') # Remove locked parameters lck_params = self._lck_params.get(name, []) pars = [p for p in pars if p not in lck_params] # Deduce here the names of all parameters from the spectral type src_par_names = pyLike.StringVector() self.like[name].src.spectrum().getParamNames(src_par_names) par_indices = [] par_names = [] for p in src_par_names: if pars is not None and p not in pars: continue idx = self.like.par_index(name, p) if free == free_pars[idx]: continue par_indices.append(idx) par_names.append(p) if len(par_names) == 0: return if free: self.logger.log(loglevel, 'Freeing parameters for %-22s: %s', name, par_names) else: self.logger.log(loglevel, 'Fixing parameters for %-22s: %s', name, par_names) for (idx, par_name) in zip(par_indices, par_names): self.like[idx].setFree(free) self._sync_params_state(name)
Free/Fix parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False) source parameters. pars : list Set a list of parameters to be freed/fixed for this source. If none then all source parameters will be freed/fixed with the exception of those defined in the skip_pars list.
def antonym(phrase, format="json"): """ queries the bighugelabs API for the antonym. The results include - "syn" (synonym) - "ant" (antonym) - "rel" (related terms) - "sim" (similar terms) - "usr" (user suggestions) But currently parsing only the antonym as I have already done - synonym (using glosbe API) :param phrase: word for which antonym is to be found :param format: response structure type. Defaults to: "json" :returns: returns a json object :raises KeyError: returns False when no antonyms are found """ base_url = Vocabulary.__get_api_link("bighugelabs") url = base_url.format(word=phrase) json_obj = Vocabulary.__return_json(url) if not json_obj: return False result = [] visited = {} idx = 0 for key in json_obj.keys(): antonyms = json_obj[key].get('ant', False) if not antonyms: continue for antonym in antonyms: if visited.get(antonym, False): continue result.append({'seq': idx, 'text': antonym}) idx += 1 visited[antonym] = True if not result: return False return Response().respond(result, format)
queries the bighugelabs API for the antonym. The results include - "syn" (synonym) - "ant" (antonym) - "rel" (related terms) - "sim" (similar terms) - "usr" (user suggestions) But currently parsing only the antonym as I have already done - synonym (using glosbe API) :param phrase: word for which antonym is to be found :param format: response structure type. Defaults to: "json" :returns: returns a json object :raises KeyError: returns False when no antonyms are found
def modify_sub_vect_frags(self): "include repeated frags" modified_vect_frags = dict() init_vect_frags = self.sub_level.S_o_A_frags # init_max_id_d = init_vect_frags["id"].max() max_id_F = len(init_vect_frags["id"]) max_id_C = init_vect_frags["id_c"].max() + 1 # HSV_tuples = [(x*1.0/(max_id_C - 1), 0.5, 0.5) for x in range(0, # (max_id_C-1))] # cmap = plt.cm.gist_ncar cmap = plt.cm.prism # extract all colors from the .jet map cmaplist = [cmap(i) for i in range(cmap.N)] id_smple = np.linspace(0, cmap.N, num=max_id_C) RGB_tuples = [] for i in range(0, max_id_C - 1): RGB_tuples.append(cmaplist[int(id_smple[i])]) # RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples) self.init_n_sub_frags = len(init_vect_frags["id"]) modified_vect_frags["pos"] = list(init_vect_frags["pos"]) modified_vect_frags["sub_pos"] = list(init_vect_frags["sub_pos"]) modified_vect_frags["id_c"] = list(init_vect_frags["id_c"]) modified_vect_frags["start_bp"] = list(init_vect_frags["start_bp"]) modified_vect_frags["len_bp"] = list(init_vect_frags["len_bp"]) modified_vect_frags["sub_len"] = list(init_vect_frags["sub_len"]) modified_vect_frags["circ"] = list(init_vect_frags["circ"]) modified_vect_frags["id"] = list(init_vect_frags["id"]) modified_vect_frags["prev"] = list(init_vect_frags["prev"]) modified_vect_frags["next"] = list(init_vect_frags["next"]) modified_vect_frags["l_cont"] = list(init_vect_frags["l_cont"]) modified_vect_frags["sub_l_cont"] = list(init_vect_frags["sub_l_cont"]) modified_vect_frags["l_cont_bp"] = list(init_vect_frags["l_cont_bp"]) modified_vect_frags["n_accu"] = list(init_vect_frags["n_accu"]) modified_vect_frags["rep"] = list(np.zeros(max_id_F, dtype=np.int32)) modified_vect_frags["activ"] = list(np.ones(max_id_F, dtype=np.int32)) modified_vect_frags["id_d"] = list(init_vect_frags["id"]) # WARNING IMPLICT BREAKING OF THE CONTIGS for data_dup in self.sub_candidates_output_data: n_dup = int(data_dup[1]) id_f = data_dup[0] for k in range(0, n_dup): modified_vect_frags["pos"].append(0) modified_vect_frags["sub_pos"].append(0) modified_vect_frags["id_c"].append(max_id_C) modified_vect_frags["start_bp"].append(0) modified_vect_frags["len_bp"].append( init_vect_frags["len_bp"][id_f] ) modified_vect_frags["sub_len"].append( init_vect_frags["sub_len"][id_f] ) modified_vect_frags["circ"].append( init_vect_frags["circ"][id_f] ) modified_vect_frags["id"].append(max_id_F) modified_vect_frags["prev"].append(-1) modified_vect_frags["next"].append(-1) modified_vect_frags["l_cont"].append(1) modified_vect_frags["sub_l_cont"].append( init_vect_frags["sub_len"][id_f] ) modified_vect_frags["l_cont_bp"].append( init_vect_frags["len_bp"][id_f] ) modified_vect_frags["n_accu"].append( init_vect_frags["n_accu"][id_f] ) modified_vect_frags["rep"].append(1) modified_vect_frags["activ"].append(1) modified_vect_frags["id_d"].append(init_vect_frags["id"][id_f]) max_id_F += 1 max_id_C += 1 logger.info("MAX ID CONTIG = {}".format(max_id_C)) modified_vect_frags["pos"] = np.array( modified_vect_frags["pos"], dtype=np.int32 ) modified_vect_frags["sub_pos"] = np.array( modified_vect_frags["sub_pos"], dtype=np.int32 ) modified_vect_frags["id_c"] = np.array( modified_vect_frags["id_c"], dtype=np.int32 ) modified_vect_frags["start_bp"] = np.array( modified_vect_frags["start_bp"], dtype=np.int32 ) modified_vect_frags["len_bp"] = np.array( modified_vect_frags["len_bp"], dtype=np.int32 ) modified_vect_frags["sub_len"] = np.array( modified_vect_frags["sub_len"], dtype=np.int32 ) modified_vect_frags["circ"] = np.array( modified_vect_frags["circ"], dtype=np.int32 ) modified_vect_frags["id"] = np.array( modified_vect_frags["id"], dtype=np.int32 ) modified_vect_frags["prev"] = np.array( modified_vect_frags["prev"], dtype=np.int32 ) modified_vect_frags["next"] = np.array( modified_vect_frags["next"], dtype=np.int32 ) modified_vect_frags["l_cont"] = np.array( modified_vect_frags["l_cont"], dtype=np.int32 ) modified_vect_frags["sub_l_cont"] = np.array( modified_vect_frags["sub_l_cont"], dtype=np.int32 ) modified_vect_frags["l_cont_bp"] = np.array( modified_vect_frags["l_cont_bp"], dtype=np.int32 ) modified_vect_frags["n_accu"] = np.array( modified_vect_frags["n_accu"], dtype=np.int32 ) modified_vect_frags["rep"] = np.array( modified_vect_frags["rep"], dtype=np.int32 ) modified_vect_frags["activ"] = np.array( modified_vect_frags["activ"], dtype=np.int32 ) modified_vect_frags["id_d"] = np.array( modified_vect_frags["id_d"], dtype=np.int32 ) id_x = 0 collector_id_repeats = [] frag_dispatcher = [] for id_f in range(0, self.init_n_sub_frags): if id_f in self.sub_candidates_dup: id_start = id_x id_dup = np.nonzero(modified_vect_frags["id_d"] == id_f)[0] collector_id_repeats.extend(list(id_dup)) n_rep = len(id_dup) frag_dispatcher.append( (np.int32(id_start), np.int32(id_start + n_rep)) ) id_x += n_rep else: id_start = id_x n_rep = 1 frag_dispatcher.append( (np.int32(id_start), np.int32(id_start + n_rep)) ) collector_id_repeats.append(id_f) id_x += 1 self.sub_collector_id_repeats = np.array( collector_id_repeats, dtype=np.int32 ) self.sub_frag_dispatcher = np.array(frag_dispatcher, dtype=self.int2) self.sub_n_frags = len(modified_vect_frags["id"]) # pos_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32) # col_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32) # # for id_f_curr in xrange(0 , self.sub_n_frags): # id_d = modified_vect_frags['id_d'][id_f_curr] # id_c = init_vect_frags['id_c'][id_d] # pos_vect_frags_4_GL[id_f_curr, 0] = # modified_vect_frags['pos'][id_f_curr] # pos_vect_frags_4_GL[id_f_curr, 1] = # modified_vect_frags['id_c'][id_f_curr] # pos_vect_frags_4_GL[id_f_curr, 2] = 0. # pos_vect_frags_4_GL[id_f_curr, 3] = np.float32(1.0) # # col_vect_frags_4_GL[id_f_curr, 0] = # np.float32(RGB_tuples[id_c - 1][0]) # col_vect_frags_4_GL[id_f_curr, 1] = # np.float32(RGB_tuples[id_c - 1][1]) # col_vect_frags_4_GL[id_f_curr, 2] = # np.float32(RGB_tuples[id_c - 1][2]) # col_vect_frags_4_GL[id_f_curr, 3] = # np.float32(1.0) # # self.sub_col_vect_frags_4_GL = col_vect_frags_4_GL # self.sub_pos_vect_frags_4_GL = pos_vect_frags_4_GL self.new_sub_S_o_A_frags = modified_vect_frags
include repeated frags
def _read(self, pin): """Perform an ADC read. Returns the signed integer result of the read.""" config = _ADS1X15_CONFIG_OS_SINGLE config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET config |= _ADS1X15_CONFIG_GAIN[self.gain] config |= self.mode config |= self.rate_config[self.data_rate] config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE self._write_register(_ADS1X15_POINTER_CONFIG, config) while not self._conversion_complete(): time.sleep(0.01) return self.get_last_result()
Perform an ADC read. Returns the signed integer result of the read.
def contains(self, points): """ Given a set of points, determine whether or not they are inside the mesh. This raises an error if called on a non- watertight mesh. Parameters --------- points : (n, 3) float Points in cartesian space Returns --------- contains : (n, ) bool Whether or not each point is inside the mesh """ if not self.is_watertight: log.warning('Mesh is non- watertight for contained point query!') contains = self.ray.contains_points(points) return contains
Given a set of points, determine whether or not they are inside the mesh. This raises an error if called on a non- watertight mesh. Parameters --------- points : (n, 3) float Points in cartesian space Returns --------- contains : (n, ) bool Whether or not each point is inside the mesh
def iter_forks(self, number=-1, etag=None): """Iterator of forks of this gist. .. versionchanged:: 0.9 Added params ``number`` and ``etag``. :param int number: (optional), number of forks to iterate over. Default: -1 will iterate over all forks of this gist. :param str etag: (optional), ETag from a previous request to this endpoint. :returns: generator of :class:`Gist <Gist>` """ url = self._build_url('forks', base_url=self._api) return self._iter(int(number), url, Gist, etag=etag)
Iterator of forks of this gist. .. versionchanged:: 0.9 Added params ``number`` and ``etag``. :param int number: (optional), number of forks to iterate over. Default: -1 will iterate over all forks of this gist. :param str etag: (optional), ETag from a previous request to this endpoint. :returns: generator of :class:`Gist <Gist>`
def transfer(self, name, cache_key=None): """ Transfers the file with the given name to the remote storage backend by queuing the task. :param name: file name :type name: str :param cache_key: the cache key to set after a successful task run :type cache_key: str :rtype: task result """ if cache_key is None: cache_key = self.get_cache_key(name) return self.task.delay(name, cache_key, self.local_path, self.remote_path, self.local_options, self.remote_options)
Transfers the file with the given name to the remote storage backend by queuing the task. :param name: file name :type name: str :param cache_key: the cache key to set after a successful task run :type cache_key: str :rtype: task result
def DiffDataObjects(self, oldObj, newObj): """Diff Data Objects""" if oldObj == newObj: return True if not oldObj or not newObj: __Log__.debug('DiffDataObjects: One of the objects in None') return False oldType = Type(oldObj) newType = Type(newObj) if oldType != newType: __Log__.debug( 'DiffDataObjects: Types do not match for dataobjects. %s != %s' % (oldObj._wsdlName, newObj._wsdlName)) return False for prop in oldObj._GetPropertyList(): oldProp = getattr(oldObj, prop.name) newProp = getattr(newObj, prop.name) propType = oldObj._GetPropertyInfo(prop.name).type if not oldProp and not newProp: continue elif ((prop.flags & VmomiSupport.F_OPTIONAL) and self._looseMatch and (not newProp or not oldProp)): continue elif not oldProp or not newProp: __Log__.debug( 'DiffDataObjects: One of the objects has property %s unset' % prop.name) return False bMatch = True if IsPrimitiveType(oldProp): bMatch = oldProp == newProp elif isinstance(oldProp, types.ManagedObject): bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags & VmomiSupport.F_LINK) elif isinstance(oldProp, types.DataObject): if prop.flags & VmomiSupport.F_LINK: bMatch = oldObj.GetKey() == newObj.GetKey() LogIf(not bMatch, 'DiffDataObjects: Key match failed %s != %s' % (oldObj.GetKey(), newObj.GetKey())) else: bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags & VmomiSupport.F_LINK) elif isinstance(oldProp, list): bMatch = self.DiffArrayObjects(oldProp, newProp, prop.flags & VmomiSupport.F_LINK) else: raise TypeError("Unknown type: "+repr(propType)) if not bMatch: __Log__.debug('DiffDataObjects: Objects differ in property %s' % prop.name) return False return True
Diff Data Objects
def open(filename=None, file=None, mode='r', suffix=None, options=None): """ Opens the archive at the specified *filename* or from the file-like object *file* using the appropriate opener. A specific opener can be specified by passing the *suffix* argument. # Parameters filename (str): A filename to open the archive from. file (file-like): A file-like object as source/destination. mode (str): The mode to open the archive in. suffix (str): Possible override for the *filename* suffix. Must be specified when *file* is passed instead of *filename*. options (dict): A dictionary that will be passed to the opener with which additional options can be specified. return (archive-like): An object that represents the archive and follows the interface of the #tarfile.TarFile class. """ if mode not in ('r', 'w', 'a'): raise ValueError("invalid mode: {0!r}".format(mode)) if suffix is None: suffix, opener = get_opener(filename) if file is not None: filename = None # We don't need it anymore. else: if file is not None and filename is not None: raise ValueError("filename must not be set with file & suffix specified") try: opener = openers[suffix] except KeyError: raise UnknownArchive(suffix) if options is None: options = {} if file is not None: if mode in 'wa' and not hasattr(file, 'write'): raise TypeError("file.write() does not exist", file) if mode == 'r' and not hasattr(file, 'read'): raise TypeError("file.read() does not exist", file) if [filename, file].count(None) != 1: raise ValueError("either filename or file must be specified") if filename is not None: file = builtins.open(filename, mode + 'b') try: return opener(file, mode, options) except: if filename is not None: file.close() raise
Opens the archive at the specified *filename* or from the file-like object *file* using the appropriate opener. A specific opener can be specified by passing the *suffix* argument. # Parameters filename (str): A filename to open the archive from. file (file-like): A file-like object as source/destination. mode (str): The mode to open the archive in. suffix (str): Possible override for the *filename* suffix. Must be specified when *file* is passed instead of *filename*. options (dict): A dictionary that will be passed to the opener with which additional options can be specified. return (archive-like): An object that represents the archive and follows the interface of the #tarfile.TarFile class.
def valid_zcta_or_raise(zcta): """ Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. """ conn = metadata_db_connection_proxy.get_connection() cur = conn.cursor() cur.execute( """ select exists ( select zcta_id from zcta_metadata where zcta_id = ? ) """, (zcta,), ) (exists,) = cur.fetchone() if exists: return True else: raise UnrecognizedZCTAError(zcta)
Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not.
def get_sys_path(cls, python_path): """Get the :data:`sys.path` data for a given python executable. :param str python_path: Path to a specific python executable. :return: The system path information for that python runtime. :rtype: list """ command = [python_path, "-c", "import json, sys; print(json.dumps(sys.path))"] c = vistir.misc.run(command, return_object=True, block=True, nospin=True) assert c.returncode == 0, "failed loading virtualenv path" sys_path = json.loads(c.out.strip()) return sys_path
Get the :data:`sys.path` data for a given python executable. :param str python_path: Path to a specific python executable. :return: The system path information for that python runtime. :rtype: list
def update(self, byte_arr): """Read bytes and update the CRC computed.""" if byte_arr: self.value = self.calculate(byte_arr, self.value)
Read bytes and update the CRC computed.
def configure_discord_logger( self, discord_webhook=None, discord_recipient=None, log_level='ERROR', log_format=ReportingFormats.PRETTY_PRINT.value, custom_args='' ): """logger for sending messages to Discord. Easy way to alert humans of issues Note: Will try to overwrite minimum log level to enable requested log_level Will warn and not attach hipchat logger if missing webhook key Learn more about webhooks: https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks Args: discord_webhook (str): discord room webhook (full URL) discord_recipient (`str`:<@int>, optional): user/group to notify log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes custom_args (str): special ID to include in messages """ # Override defaults if required # discord_webhook = self.config.get_option( 'LOGGING', 'discord_webhook', None, discord_webhook ) discord_recipient = self.config.get_option( 'LOGGING', 'discord_recipient', None, discord_recipient ) log_level = self.config.get_option( 'LOGGING', 'discord_level', None, log_level ) # Actually build discord logging handler # discord_obj = DiscordWebhook() discord_obj.webhook(discord_webhook) # vv TODO vv: Test review # if discord_obj.can_query: discord_handler = HackyDiscordHandler( discord_obj, discord_recipient ) self._configure_common( 'discord_', log_level, log_format, 'Discord', discord_handler, custom_args=custom_args ) else: warnings.warn( 'Unable to execute webhook', exceptions.WebhookCreateFailed )
logger for sending messages to Discord. Easy way to alert humans of issues Note: Will try to overwrite minimum log level to enable requested log_level Will warn and not attach hipchat logger if missing webhook key Learn more about webhooks: https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks Args: discord_webhook (str): discord room webhook (full URL) discord_recipient (`str`:<@int>, optional): user/group to notify log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes custom_args (str): special ID to include in messages
def project_graph(G, to_crs=None): """ Project a graph from lat-long to the UTM zone appropriate for its geographic location. Parameters ---------- G : networkx multidigraph the networkx graph to be projected to_crs : dict if not None, just project to this CRS instead of to UTM Returns ------- networkx multidigraph """ G_proj = G.copy() start_time = time.time() # create a GeoDataFrame of the nodes, name it, convert osmid to str nodes, data = zip(*G_proj.nodes(data=True)) gdf_nodes = gpd.GeoDataFrame(list(data), index=nodes) gdf_nodes.crs = G_proj.graph['crs'] gdf_nodes.gdf_name = '{}_nodes'.format(G_proj.name) # create new lat/lon columns just to save that data for later, and create a # geometry column from x/y gdf_nodes['lon'] = gdf_nodes['x'] gdf_nodes['lat'] = gdf_nodes['y'] gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1) log('Created a GeoDataFrame from graph in {:,.2f} seconds'.format(time.time()-start_time)) # project the nodes GeoDataFrame to UTM gdf_nodes_utm = project_gdf(gdf_nodes, to_crs=to_crs) # extract data for all edges that have geometry attribute edges_with_geom = [] for u, v, key, data in G_proj.edges(keys=True, data=True): if 'geometry' in data: edges_with_geom.append({'u':u, 'v':v, 'key':key, 'geometry':data['geometry']}) # create an edges GeoDataFrame and project to UTM, if there were any edges # with a geometry attribute. geom attr only exists if graph has been # simplified, otherwise you don't have to project anything for the edges # because the nodes still contain all spatial data if len(edges_with_geom) > 0: gdf_edges = gpd.GeoDataFrame(edges_with_geom) gdf_edges.crs = G_proj.graph['crs'] gdf_edges.gdf_name = '{}_edges'.format(G_proj.name) gdf_edges_utm = project_gdf(gdf_edges, to_crs=to_crs) # extract projected x and y values from the nodes' geometry column start_time = time.time() gdf_nodes_utm['x'] = gdf_nodes_utm['geometry'].map(lambda point: point.x) gdf_nodes_utm['y'] = gdf_nodes_utm['geometry'].map(lambda point: point.y) gdf_nodes_utm = gdf_nodes_utm.drop('geometry', axis=1) log('Extracted projected node geometries from GeoDataFrame in {:,.2f} seconds'.format(time.time()-start_time)) # clear the graph to make it a blank slate for the projected data start_time = time.time() edges = list(G_proj.edges(keys=True, data=True)) graph_name = G_proj.graph['name'] G_proj.clear() # add the projected nodes and all their attributes to the graph G_proj.add_nodes_from(gdf_nodes_utm.index) attributes = gdf_nodes_utm.to_dict() for label in gdf_nodes_utm.columns: nx.set_node_attributes(G_proj, name=label, values=attributes[label]) # add the edges and all their attributes (including reconstructed geometry, # when it exists) to the graph for u, v, key, attributes in edges: if 'geometry' in attributes: row = gdf_edges_utm[(gdf_edges_utm['u']==u) & (gdf_edges_utm['v']==v) & (gdf_edges_utm['key']==key)] attributes['geometry'] = row['geometry'].iloc[0] # attributes dict contains key, so we don't need to explicitly pass it here G_proj.add_edge(u, v, **attributes) # set the graph's CRS attribute to the new, projected CRS and return the # projected graph G_proj.graph['crs'] = gdf_nodes_utm.crs G_proj.graph['name'] = '{}_UTM'.format(graph_name) if 'streets_per_node' in G.graph: G_proj.graph['streets_per_node'] = G.graph['streets_per_node'] log('Rebuilt projected graph in {:,.2f} seconds'.format(time.time()-start_time)) return G_proj
Project a graph from lat-long to the UTM zone appropriate for its geographic location. Parameters ---------- G : networkx multidigraph the networkx graph to be projected to_crs : dict if not None, just project to this CRS instead of to UTM Returns ------- networkx multidigraph
def bedInterval(self, who): "return a BED6 entry, thus DOES coordinate conversion for minus strands" if who == 't': st, en = self.tStart, self.tEnd if self.tStrand == '-': st, en = self.tSize-en, self.tSize-st return (self.tName, st, en, self.id, self.score, self.tStrand) else: st, en = self.qStart, self.qEnd if self.qStrand == '-': st, en = self.qSize-en, self.qSize-st assert en-st == self.qEnd - self.qStart return (self.qName, st, en, self.id, self.score, self.qStrand)
return a BED6 entry, thus DOES coordinate conversion for minus strands
def cut(self): """ Cuts the selected text or the whole line if no text was selected. """ tc = self.textCursor() helper = TextHelper(self) tc.beginEditBlock() no_selection = False sText = tc.selection().toPlainText() if not helper.current_line_text() and sText.count("\n") > 1: tc.deleteChar() else: if not self.textCursor().hasSelection(): no_selection = True TextHelper(self).select_whole_line() super(CodeEdit, self).cut() if no_selection: tc.deleteChar() tc.endEditBlock() self.setTextCursor(tc)
Cuts the selected text or the whole line if no text was selected.
def activate_conf_set(self, set_name): '''Activate a configuration set by name. @raises NoSuchConfSetError ''' with self._mutex: if not set_name in self.conf_sets: raise exceptions.NoSuchConfSetError(set_name) self._conf.activate_configuration_set(set_name)
Activate a configuration set by name. @raises NoSuchConfSetError
def _get_pidfile_path(self): """Return the normalized path for the pidfile, raising an exception if it can not written to. :return: str :raises: ValueError :raises: OSError """ if self.config.daemon.pidfile: pidfile = path.abspath(self.config.daemon.pidfile) if not os.access(path.dirname(pidfile), os.W_OK): raise ValueError('Cannot write to specified pid file path' ' %s' % pidfile) return pidfile app = sys.argv[0].split('/')[-1] for pidfile in ['%s/pids/%s.pid' % (os.getcwd(), app), '/var/run/%s.pid' % app, '/var/run/%s/%s.pid' % (app, app), '/var/tmp/%s.pid' % app, '/tmp/%s.pid' % app, '%s.pid' % app]: if os.access(path.dirname(pidfile), os.W_OK): return pidfile raise OSError('Could not find an appropriate place for a pid file')
Return the normalized path for the pidfile, raising an exception if it can not written to. :return: str :raises: ValueError :raises: OSError
def serialize_encryption_context(encryption_context): """Serializes the contents of a dictionary into a byte string. :param dict encryption_context: Dictionary of encrytion context keys/values. :returns: Serialized encryption context :rtype: bytes """ if not encryption_context: return bytes() serialized_context = bytearray() dict_size = len(encryption_context) if dict_size > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE: raise SerializationError("The encryption context contains too many elements.") serialized_context.extend(struct.pack(">H", dict_size)) # Encode strings first to catch bad values. encryption_context_list = [] for key, value in encryption_context.items(): try: if isinstance(key, bytes): key = codecs.decode(key) if isinstance(value, bytes): value = codecs.decode(value) encryption_context_list.append( (aws_encryption_sdk.internal.str_ops.to_bytes(key), aws_encryption_sdk.internal.str_ops.to_bytes(value)) ) except Exception: raise SerializationError( "Cannot encode dictionary key or value using {}.".format(aws_encryption_sdk.internal.defaults.ENCODING) ) for key, value in sorted(encryption_context_list, key=lambda x: x[0]): serialized_context.extend( struct.pack( ">H{key_size}sH{value_size}s".format(key_size=len(key), value_size=len(value)), len(key), key, len(value), value, ) ) if len(serialized_context) > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE: raise SerializationError("The serialized context is too large.") return bytes(serialized_context)
Serializes the contents of a dictionary into a byte string. :param dict encryption_context: Dictionary of encrytion context keys/values. :returns: Serialized encryption context :rtype: bytes
def meid(number, separator=u' '): ''' Printable Mobile Equipment Identifier (MEID) number. >>> print(meid(123456789012345678)) 1B 69B4BA 630F34 6 >>> print(meid('1B69B4BA630F34')) 1B 69B4BA 630F34 6 ''' if isinstance(number, six.string_types): number = re.sub(r'[\s-]', '', number) try: number = '%014X' % int(number, 16) except ValueError: if len(number) < 18 and number.isdigit(): return meid('%014X' % int(number), separator) else: raise ValueError(_('Invalid MEID, size mismatch')) else: if len(number) not in (14, 15): raise ValueError(_('Invalid MEID, size mismatch')) elif isinstance(number, six.integer_types): if number > 0xfffffffffffffff: raise ValueError(_('Invalid MEID, size mismatch')) return meid(('%014X' % number)[:14], separator) else: raise TypeError(_('Invalid MEID, input type invalid')) number = number.upper() region = number[:2] manufacturer = number[2:8] serial_number = number[8:14] check_digit = number[14:] if check_digit == '': check_digit = luhn_calc(number, chars='0123456789ABCDEF') groups = (region, manufacturer, serial_number, check_digit) return separator.join(list(filter(None, groups)))
Printable Mobile Equipment Identifier (MEID) number. >>> print(meid(123456789012345678)) 1B 69B4BA 630F34 6 >>> print(meid('1B69B4BA630F34')) 1B 69B4BA 630F34 6
def capture(self, commit = ""): """Capture the current state of a project based on its provider Commit is relevant only for upstream providers. If empty, the latest commit from provider repository is taken. It is ignored for distribution providers. :param provider: project provider, e.g. upstream repository, distribution builder :type provider: json/dict :param commit: project's original commit :type commit: string """ self._validateProvider(self._provider) # get client for repository # TODO(jchaloup): read config file to switch between local and remove clients # TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info client = RepositoryClientBuilder().buildWithRemoteClient(self._provider) if self._provider["provider"] == "github": self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature() elif self._provider["provider"] == "bitbucket": self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature() else: raise KeyError("Provider '%s' not recognized" % self._provider["provider"]) return self
Capture the current state of a project based on its provider Commit is relevant only for upstream providers. If empty, the latest commit from provider repository is taken. It is ignored for distribution providers. :param provider: project provider, e.g. upstream repository, distribution builder :type provider: json/dict :param commit: project's original commit :type commit: string
def getWindowRect(self, hwnd): """ Returns a rect (x,y,w,h) for the specified window's area """ rect = ctypes.wintypes.RECT() if ctypes.windll.user32.GetWindowRect(hwnd, ctypes.byref(rect)): x1 = rect.left y1 = rect.top x2 = rect.right y2 = rect.bottom return (x1, y1, x2-x1, y2-y1) return None
Returns a rect (x,y,w,h) for the specified window's area
def _from_dict(cls, _dict): """Initialize a Expansions object from a json dictionary.""" args = {} if 'expansions' in _dict: args['expansions'] = [ Expansion._from_dict(x) for x in (_dict.get('expansions')) ] else: raise ValueError( 'Required property \'expansions\' not present in Expansions JSON' ) return cls(**args)
Initialize a Expansions object from a json dictionary.
def schema(ctx, schema): """ Load schema definitions from a YAML file. """ data = yaml.load(schema) if not isinstance(data, (list, tuple)): data = [data] with click.progressbar(data, label=schema.name) as bar: for schema in bar: ctx.obj['grano'].schemata.upsert(schema)
Load schema definitions from a YAML file.
def unserialize(wd: WordDictionary, text: Dict): """ Transforms back a serialized value of `serialize()` """ if not isinstance(text, Mapping): raise ValueError('Text has not the right format') try: t = text['type'] if t == 'string': return text['value'] elif t == 'trans': if not isinstance(text['params'], Mapping): raise ValueError('Params should be a dictionary') for param in text['params']: if not isinstance(param, str): raise ValueError('Params are not all text-keys') return StringToTranslate( wd=wd, key=text['key'], count=text['count'], params=text['params'], ) else: raise ValueError('Unknown type "{}"'.format(t)) except KeyError: raise ValueError('Not enough information to unserialize')
Transforms back a serialized value of `serialize()`
def scripts(cls, pkg, metadata, paths=[], **kwargs): """This class method is the preferred way to create SceneScript objects. :param str pkg: The dotted name of the package containing the scripts. :param metadata: A mapping or data object. This parameter permits searching among scripts against particular criteria. Its use is application specific. :param list(str) paths: A sequence of file paths to the scripts relative to the package. You can satisfy all parameter requirements by passing in a :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object like this:: SceneScript.scripts(**folder._asdict()) The method generates a sequence of :py:class:`~turberfield.dialogue.model.SceneScript` objects. """ for path in paths: try: fP = pkg_resources.resource_filename(pkg, path) except ImportError: cls.log.warning( "No package called {}".format(pkg) ) else: if not os.path.isfile(fP): cls.log.warning( "No script file at {}".format(os.path.join(*pkg.split(".") + [path])) ) else: yield cls(fP, metadata)
This class method is the preferred way to create SceneScript objects. :param str pkg: The dotted name of the package containing the scripts. :param metadata: A mapping or data object. This parameter permits searching among scripts against particular criteria. Its use is application specific. :param list(str) paths: A sequence of file paths to the scripts relative to the package. You can satisfy all parameter requirements by passing in a :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object like this:: SceneScript.scripts(**folder._asdict()) The method generates a sequence of :py:class:`~turberfield.dialogue.model.SceneScript` objects.
def size(self): """Size is number of nodes under the trie, including the current node""" if self.children: return sum( ( c.size() for c in self.children.values() ) ) + 1 else: return 1
Size is number of nodes under the trie, including the current node
def volume_detach(self, name, timeout=300): ''' Detach a block device ''' try: volume = self.volume_show(name) except KeyError as exc: raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc)) if not volume['attachments']: return True response = self.compute_conn.volumes.delete_server_volume( volume['attachments'][0]['server_id'], volume['attachments'][0]['id'] ) trycount = 0 start = time.time() while True: trycount += 1 try: response = self._volume_get(volume['id']) if response['status'] == 'available': return response except Exception as exc: log.debug('Volume is detaching: %s', name) time.sleep(1) if time.time() - start > timeout: log.error('Timed out after %d seconds ' 'while waiting for data', timeout) return False log.debug( 'Retrying volume_show() (try %d)', trycount )
Detach a block device
def cube(target, pore_diameter='pore.diameter', throat_area='throat.area'): r""" Calculates internal surface area of pore bodies assuming they are cubes then subtracts the area of the neighboring throats. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. pore_diameter : string The dictionary key to the pore diameter array. throat_area : string The dictioanry key to the throat area array. Throat areas are needed since their insection with the pore are removed from the computation. """ network = target.project.network D = target[pore_diameter] Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False) Tsurf = _np.array([_np.sum(network[throat_area][Ts]) for Ts in Tn]) value = 6*D**2 - Tsurf return value
r""" Calculates internal surface area of pore bodies assuming they are cubes then subtracts the area of the neighboring throats. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. pore_diameter : string The dictionary key to the pore diameter array. throat_area : string The dictioanry key to the throat area array. Throat areas are needed since their insection with the pore are removed from the computation.
def _to_M8(key, tz=None): """ Timestamp-like => dt64 """ if not isinstance(key, Timestamp): # this also converts strings key = Timestamp(key) if key.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware key = key.tz_convert(tz) else: key = key.tz_localize(tz) return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
Timestamp-like => dt64
def post_run_cell(self): """Runs after the user-entered code in a cell has been executed. It detects any new, decoratable objects that haven't been decorated yet and then decorates them. """ #We just want to detect any new, decoratable objects that haven't been #decorated yet. decorlist = {k: [] for k in self.atypes} for atype in self.atypes: for n, o in self._get_decoratables(atype): self._decorate(atype, n, o) #Next, check whether we have an outstanding "loop intercept" that we #"wrapped" with respect to acorn by enabling streamlining. if self.pre is not None: #Re-enable the acorn logging systems so that it gets back to normal. from acorn.logging.decoration import set_streamlining set_streamlining(False) from acorn import msg from acorn.logging.database import record from time import time #Determine the elapsed time for the execution of the entire cell. entry = self.pre entry["e"] = time() - entry["s"] #See if we can match the executed cell's code up with one that we #intercepted in the past.. cellid = self._find_cellid(entry["c"]) if cellid is None: cellid = self.cellid #Store the contents of the cell *before* they get overwritten by a #diff. self.cellids[cellid] = entry["c"] record("__main__.{0:d}".format(cellid), entry, diff=True) msg.info(entry, 1) self.pre = None #Finally, check whether any new variables have shown up, or have had #their values changed. from acorn.logging.database import tracker, active_db, Instance varchange = self._var_changes() taskdb = active_db() for n, o in varchange: otrack = tracker(o) if isinstance(otrack, Instance): taskdb.log_uuid(otrack.uuid) global thumb_uuid if thumb_uuid is not None: self._log_images() #Reset the image tracker list so that we don't save these images #again next cell execution. thumb_uuid = None self.cellid = None
Runs after the user-entered code in a cell has been executed. It detects any new, decoratable objects that haven't been decorated yet and then decorates them.
def require_server(fn): """ Checks if the user has called the task with a server name. Fabric tasks decorated with this decorator must be called like so:: fab <server name> <task name> If no server name is given, the task will not be executed. """ @wraps(fn) def wrapper(*args, **kwargs): if env.machine is None: abort(red('ERROR: You must provide a server name to call this' ' task!')) return fn(*args, **kwargs) return wrapper
Checks if the user has called the task with a server name. Fabric tasks decorated with this decorator must be called like so:: fab <server name> <task name> If no server name is given, the task will not be executed.
def next(self): """ Where to redirect after authorization """ next = request.args.get('next') if next is None: params = self.default_redirect_params next = url_for(self.default_redirect_endpoint, **params) return next
Where to redirect after authorization
def validate_bool(b): """Convert b to a boolean or raise""" if isinstance(b, six.string_types): b = b.lower() if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False else: raise ValueError('Could not convert "%s" to boolean' % b)
Convert b to a boolean or raise
def to_dict(_dict, onts, mdb_safe=False): """ Convert a pysaml2 SAML2 message class instance into a basic dictionary format. The export interface. :param _dict: The pysaml2 metadata instance :param onts: List of schemas to use for the conversion :return: The converted information """ res = {} if isinstance(_dict, SamlBase): res["__class__"] = "%s&%s" % (_dict.c_namespace, _dict.c_tag) for key in _dict.keyswv(): if key in IMP_SKIP: continue val = getattr(_dict, key) if key == "extension_elements": _eel = extension_elements_to_elements(val, onts) _val = [_eval(_v, onts, mdb_safe) for _v in _eel] elif key == "extension_attributes": if mdb_safe: _val = dict([(k.replace(".", "__"), v) for k, v in val.items()]) #_val = {k.replace(".", "__"): v for k, v in val.items()} else: _val = val else: _val = _eval(val, onts, mdb_safe) if _val: if mdb_safe: key = key.replace(".", "__") res[key] = _val else: for key, val in _dict.items(): _val = _eval(val, onts, mdb_safe) if _val: if mdb_safe and "." in key: key = key.replace(".", "__") res[key] = _val return res
Convert a pysaml2 SAML2 message class instance into a basic dictionary format. The export interface. :param _dict: The pysaml2 metadata instance :param onts: List of schemas to use for the conversion :return: The converted information
def pyoidcMiddleware(func): """Common wrapper for the underlying pyoidc library functions. Reads GET params and POST data before passing it on the library and converts the response from oic.utils.http_util to wsgi. :param func: underlying library function """ def wrapper(environ, start_response): data = get_or_post(environ) cookies = environ.get("HTTP_COOKIE", "") resp = func(request=data, cookie=cookies) return resp(environ, start_response) return wrapper
Common wrapper for the underlying pyoidc library functions. Reads GET params and POST data before passing it on the library and converts the response from oic.utils.http_util to wsgi. :param func: underlying library function
def all_states(n, big_endian=False): """Return all binary states for a system. Args: n (int): The number of elements in the system. big_endian (bool): Whether to return the states in big-endian order instead of little-endian order. Yields: tuple[int]: The next state of an ``n``-element system, in little-endian order unless ``big_endian`` is ``True``. """ if n == 0: return for state in product((0, 1), repeat=n): if big_endian: yield state else: yield state[::-1]
Return all binary states for a system. Args: n (int): The number of elements in the system. big_endian (bool): Whether to return the states in big-endian order instead of little-endian order. Yields: tuple[int]: The next state of an ``n``-element system, in little-endian order unless ``big_endian`` is ``True``.
def p_ifdef_else(p): """ ifdef : ifdefelsea ifdefelseb ENDIF """ global ENABLED p[0] = p[1] + p[2] p[0] += ['#line %i "%s"' % (p.lineno(3) + 1, CURRENT_FILE[-1])] ENABLED = IFDEFS[-1][0] IFDEFS.pop()
ifdef : ifdefelsea ifdefelseb ENDIF
def compute_heterozygosity(in_prefix, nb_samples): """Computes the heterozygosity ratio of samples (from tped).""" tped_name = in_prefix + ".tped" tfam_name = in_prefix + ".tfam" # The function we want to use check_heterozygosity = np.vectorize(is_heterozygous) # The autosomes autosomes = {str(i) for i in xrange(1, 23)} # The tfam samples = None with open(tfam_name, 'rb') as input_file: samples = input_file.readlines() samples = [tuple(i.rstrip("\r\n").split("\t")[:2]) for i in samples] heterozygosity = np.zeros(nb_samples, dtype=int) nb_markers = np.zeros(nb_samples, dtype=int) with open(tped_name, 'rb') as input_file: # There is no header for line in input_file: row = np.array(line.rstrip("\r\n").split("\t")) chromosome = row[0] if chromosome not in autosomes: # This is not an autosome, so we skip continue # Getting the genotypes genotypes = row[4:] # Finding the heterozygous genotypes heterozygosity += check_heterozygosity(genotypes) # Adding to number of markers for each samples (excluding no calls) nb_markers += genotypes != "0 0" return np.true_divide(heterozygosity, nb_markers), samples
Computes the heterozygosity ratio of samples (from tped).
def file_names(self) -> Iterable[str]: """Generates all file names that are used to generate file_handles. """ if self.is_sharded(): yield from ShardedFile(self.filename_spec).get_filenames() elif self.filename_spec: yield self.filename_spec
Generates all file names that are used to generate file_handles.
def simulate(args): """ %prog simulate run_dir 1 300 Simulate BAMs with varying inserts with dwgsim. The above command will simulate between 1 to 300 CAGs in the HD region, in a directory called `run_dir`. """ p = OptionParser(simulate.__doc__) p.add_option("--method", choices=("wgsim", "eagle"), default="eagle", help="Read simulator") p.add_option("--ref", default="hg38", choices=("hg38", "hg19"), help="Reference genome version") p.add_option("--tred", default="HD", help="TRED locus") add_simulate_options(p) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) rundir, startunits, endunits = args ref = opts.ref ref_fasta = "/mnt/ref/{}.upper.fa".format(ref) startunits, endunits = int(startunits), int(endunits) basecwd = os.getcwd() mkdir(rundir) os.chdir(rundir) cwd = os.getcwd() # TRED region (e.g. Huntington) pad_left, pad_right = 1000, 10000 repo = TREDsRepo(ref=ref) tred = repo[opts.tred] chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end logging.debug("Simulating {}".format(tred)) fasta = Fasta(ref_fasta) seq_left = fasta[chr][start - pad_left:start - 1] seq_right = fasta[chr][end: end + pad_right] motif = tred.repeat simulate_method = wgsim if opts.method == "wgsim" else eagle # Write fake sequence for units in range(startunits, endunits + 1): pf = str(units) mkdir(pf) os.chdir(pf) seq = str(seq_left) + motif * units + str(seq_right) fastafile = pf + ".fasta" make_fasta(seq, fastafile, id=chr.upper()) # Simulate reads on it simulate_method([fastafile, "--depth={}".format(opts.depth), "--readlen={}".format(opts.readlen), "--distance={}".format(opts.distance), "--outfile={}".format(pf)]) read1 = pf + ".bwa.read1.fastq" read2 = pf + ".bwa.read2.fastq" samfile, _ = align([ref_fasta, read1, read2]) indexed_samfile = index([samfile]) sh("mv {} ../{}.bam".format(indexed_samfile, pf)) sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf)) os.chdir(cwd) shutil.rmtree(pf) os.chdir(basecwd)
%prog simulate run_dir 1 300 Simulate BAMs with varying inserts with dwgsim. The above command will simulate between 1 to 300 CAGs in the HD region, in a directory called `run_dir`.
def _slurm_info(queue): """Returns machine information for a slurm job scheduler. """ cl = "sinfo -h -p {} --format '%c %m %D'".format(queue) num_cpus, mem, num_nodes = subprocess.check_output(shlex.split(cl)).decode().split() # if the queue contains multiple memory configurations, the minimum value is printed with a trailing '+' mem = float(mem.replace('+', '')) num_cpus = int(num_cpus.replace('+', '')) # handle small clusters where we need to allocate memory for bcbio and the controller # This will typically be on cloud AWS machines bcbio_mem = 2000 controller_mem = 4000 if int(num_nodes) < 3 and mem > (bcbio_mem + controller_mem) * 2: mem = mem - bcbio_mem - controller_mem return [{"cores": int(num_cpus), "memory": mem / 1024.0, "name": "slurm_machine"}]
Returns machine information for a slurm job scheduler.
def monitor(self, timeout): """ Monitor the process, check whether it runs out of time. """ def check(self, timeout): time.sleep(timeout) self.stop() wather = threading.Thread(target=check) wather.setDaemon(True) wather.start()
Monitor the process, check whether it runs out of time.
def update(self): """Update swap memory stats using the input method.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Grab SWAP using the psutil swap_memory method sm_stats = psutil.swap_memory() # Get all the swap stats (copy/paste of the psutil documentation) # total: total swap memory in bytes # used: used swap memory in bytes # free: free swap memory in bytes # percent: the percentage usage # sin: the number of bytes the system has swapped in from disk (cumulative) # sout: the number of bytes the system has swapped out from disk # (cumulative) for swap in ['total', 'used', 'free', 'percent', 'sin', 'sout']: if hasattr(sm_stats, swap): stats[swap] = getattr(sm_stats, swap) elif self.input_method == 'snmp': # Update stats using SNMP if self.short_system_name == 'windows': # Mem stats for Windows OS are stored in the FS table try: fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: self.reset() else: for fs in fs_stat: # The virtual memory concept is used by the operating # system to extend (virtually) the physical memory and # thus to run more programs by swapping unused memory # zone (page) to a disk file. if fs == 'Virtual Memory': stats['total'] = int( fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit']) stats['used'] = int( fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit']) stats['percent'] = float( stats['used'] * 100 / stats['total']) stats['free'] = stats['total'] - stats['used'] break else: stats = self.get_stats_snmp(snmp_oid=snmp_oid['default']) if stats['total'] == '': self.reset() return stats for key in iterkeys(stats): if stats[key] != '': stats[key] = float(stats[key]) * 1024 # used=total-free stats['used'] = stats['total'] - stats['free'] # percent: the percentage usage calculated as (total - # available) / total * 100. stats['percent'] = float( (stats['total'] - stats['free']) / stats['total'] * 100) # Update the stats self.stats = stats return self.stats
Update swap memory stats using the input method.
def cal_pth(self, v, temp): """ calculate thermal pressure :param v: unit-cell volume in A^3 :param temp: temperature in K :return: thermal pressure in GPa """ params_t = self._set_params(self.params_therm) return constq_pth(v, temp, *params_t, self.n, self.z, t_ref=self.t_ref, three_r=self.three_r)
calculate thermal pressure :param v: unit-cell volume in A^3 :param temp: temperature in K :return: thermal pressure in GPa
def remove(self, line): """Delete all lines matching the given line.""" nb = 0 for block in self.blocks: nb += block.remove(line) return nb
Delete all lines matching the given line.
def feature_hash(feature, dim, seed=123): """Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`. """ vec = np.zeros(dim) i = mmh3.hash(feature, seed) % dim vec[i] = 1 return vec
Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`.
def decomposed_diffusion_program(qubits: List[int]) -> Program: """ Constructs the diffusion operator used in Grover's Algorithm, acted on both sides by an a Hadamard gate on each qubit. Note that this means that the matrix representation of this operator is diag(1, -1, ..., -1). In particular, this decomposes the diffusion operator, which is a :math:`2**{len(qubits)}\times2**{len(qubits)}` sparse matrix, into :math:`\mathcal{O}(len(qubits)**2) single and two qubit gates. See C. Lavor, L.R.U. Manssur, and R. Portugal (2003) `Grover's Algorithm: Quantum Database Search`_ for more information. .. _`Grover's Algorithm: Quantum Database Search`: https://arxiv.org/abs/quant-ph/0301079 :param qubits: A list of ints corresponding to the qubits to operate on. The operator operates on bistrings of the form ``|qubits[0], ..., qubits[-1]>``. """ program = Program() if len(qubits) == 1: program.inst(Z(qubits[0])) else: program.inst([X(q) for q in qubits]) program.inst(H(qubits[-1])) program.inst(RZ(-np.pi, qubits[0])) program += (ControlledProgramBuilder() .with_controls(qubits[:-1]) .with_target(qubits[-1]) .with_operation(X_GATE) .with_gate_name(X_GATE_LABEL).build()) program.inst(RZ(-np.pi, qubits[0])) program.inst(H(qubits[-1])) program.inst([X(q) for q in qubits]) return program
Constructs the diffusion operator used in Grover's Algorithm, acted on both sides by an a Hadamard gate on each qubit. Note that this means that the matrix representation of this operator is diag(1, -1, ..., -1). In particular, this decomposes the diffusion operator, which is a :math:`2**{len(qubits)}\times2**{len(qubits)}` sparse matrix, into :math:`\mathcal{O}(len(qubits)**2) single and two qubit gates. See C. Lavor, L.R.U. Manssur, and R. Portugal (2003) `Grover's Algorithm: Quantum Database Search`_ for more information. .. _`Grover's Algorithm: Quantum Database Search`: https://arxiv.org/abs/quant-ph/0301079 :param qubits: A list of ints corresponding to the qubits to operate on. The operator operates on bistrings of the form ``|qubits[0], ..., qubits[-1]>``.
def plotMatches2(listofNValues, errors, listOfScales, scaleErrors, fileName = "images/scalar_matches.pdf"): """ Plot two figures side by side in an aspect ratio appropriate for the paper. """ w, h = figaspect(0.4) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(w,h)) plotMatches(listofNValues, errors, fileName=None, fig=fig, ax=ax1) plotScaledMatches(listOfScales, scaleErrors, fileName=None, fig=fig, ax=ax2) plt.savefig(fileName) plt.close()
Plot two figures side by side in an aspect ratio appropriate for the paper.
def tpm(tpm, check_independence=True): """Validate a TPM. The TPM can be in * 2-dimensional state-by-state form, * 2-dimensional state-by-node form, or * multidimensional state-by-node form. """ see_tpm_docs = ( 'See the documentation on TPM conventions and the `pyphi.Network` ' 'object for more information on TPM forms.' ) # Cast to np.array. tpm = np.array(tpm) # Get the number of nodes from the state-by-node TPM. N = tpm.shape[-1] if tpm.ndim == 2: if not ((tpm.shape[0] == 2**N and tpm.shape[1] == N) or (tpm.shape[0] == tpm.shape[1])): raise ValueError( 'Invalid shape for 2-D TPM: {}\nFor a state-by-node TPM, ' 'there must be ' '2^N rows and N columns, where N is the ' 'number of nodes. State-by-state TPM must be square. ' '{}'.format(tpm.shape, see_tpm_docs)) if tpm.shape[0] == tpm.shape[1] and check_independence: conditionally_independent(tpm) elif tpm.ndim == (N + 1): if tpm.shape != tuple([2] * N + [N]): raise ValueError( 'Invalid shape for multidimensional state-by-node TPM: {}\n' 'The shape should be {} for {} nodes. {}'.format( tpm.shape, ([2] * N) + [N], N, see_tpm_docs)) else: raise ValueError( 'Invalid TPM: Must be either 2-dimensional or multidimensional. ' '{}'.format(see_tpm_docs)) return True
Validate a TPM. The TPM can be in * 2-dimensional state-by-state form, * 2-dimensional state-by-node form, or * multidimensional state-by-node form.
def binary_operator(op): """ Factory function for making binary operator methods on a Filter subclass. Returns a function "binary_operator" suitable for implementing functions like __and__ or __or__. """ # When combining a Filter with a NumericalExpression, we use this # attrgetter instance to defer to the commuted interpretation of the # NumericalExpression operator. commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) def binary_operator(self, other): if isinstance(self, NumericalExpression): self_expr, other_expr, new_inputs = self.build_binary_op( op, other, ) return NumExprFilter.create( "({left}) {op} ({right})".format( left=self_expr, op=op, right=other_expr, ), new_inputs, ) elif isinstance(other, NumericalExpression): # NumericalExpression overrides numerical ops to correctly handle # merging of inputs. Look up and call the appropriate # right-binding operator with ourself as the input. return commuted_method_getter(other)(self) elif isinstance(other, Term): if other.dtype != bool_dtype: raise BadBinaryOperator(op, self, other) if self is other: return NumExprFilter.create( "x_0 {op} x_0".format(op=op), (self,), ) return NumExprFilter.create( "x_0 {op} x_1".format(op=op), (self, other), ) elif isinstance(other, int): # Note that this is true for bool as well return NumExprFilter.create( "x_0 {op} {constant}".format(op=op, constant=int(other)), binds=(self,), ) raise BadBinaryOperator(op, self, other) binary_operator.__doc__ = "Binary Operator: '%s'" % op return binary_operator
Factory function for making binary operator methods on a Filter subclass. Returns a function "binary_operator" suitable for implementing functions like __and__ or __or__.
def get_rot(slab): """ Gets the transformation to rotate the z axis into the miller index """ new_z = get_mi_vec(slab) a, b, c = slab.lattice.matrix new_x = a / np.linalg.norm(a) new_y = np.cross(new_z, new_x) x, y, z = np.eye(3) rot_matrix = np.array([np.dot(*el) for el in itertools.product([x, y, z], [new_x, new_y, new_z])]).reshape(3, 3) rot_matrix = np.transpose(rot_matrix) sop = SymmOp.from_rotation_and_translation(rot_matrix) return sop
Gets the transformation to rotate the z axis into the miller index