code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def destroySingleton(cls): singleton_key = '_{0}__singleton'.format(cls.__name__) singleton = getattr(cls, singleton_key, None) if singleton is not None: setattr(cls, singleton_key, None) singleton.close() singleton.deleteLater()
Destroys the singleton instance of this class, if one exists.
def format(self, fmt, locale=None): return self._formatter.format(self, fmt, locale)
Formats the instance using the given format. :param fmt: The format to use :type fmt: str :param locale: The locale to use :type locale: str or None :rtype: str
def join(self, pad=None, gap=None): if not self: return self.EntryClass(numpy.empty((0,) * self.EntryClass._ndim)) self.sort(key=lambda t: t.epoch.gps) out = self[0].copy() for series in self[1:]: out.append(series, gap=gap, pad=pad) return out
Concatenate all of the elements of this list into a single object Parameters ---------- pad : `float`, optional, default: `0.0` value with which to pad gaps gap : `str`, optional, default: `'raise'` what to do if there are gaps in the data, one of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. Returns ------- series : `gwpy.types.TimeSeriesBase` subclass a single series containing all data from each entry in this list See Also -------- TimeSeries.append for details on how the individual series are concatenated together
def establish_scp_conn(self): ssh_connect_params = self.ssh_ctl_chan._connect_params_dict() self.scp_conn = self.ssh_ctl_chan._build_ssh_client() self.scp_conn.connect(**ssh_connect_params) self.scp_client = scp.SCPClient(self.scp_conn.get_transport())
Establish the secure copy connection.
def ConvertMessage(self, value, message): message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): self._ConvertWrapperMessage(value, message) elif full_name in _WKTJSONMETHODS: methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self) else: self._ConvertFieldValuePair(value, message)
Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems.
def update(self, series_list): if not series_list: self.series_notebook.AddPage(wx.Panel(self, -1), _("+")) return self.updating = True self.series_notebook.DeleteAllPages() for page, attrdict in enumerate(series_list): series_panel = SeriesPanel(self.grid, attrdict) name = "Series" self.series_notebook.InsertPage(page, series_panel, name) self.series_notebook.AddPage(wx.Panel(self, -1), _("+")) self.updating = False
Updates widget content from series_list Parameters ---------- series_list: List of dict \tList of dicts with data from all series
def save(self, filename): with io.open(filename,'w',encoding='utf-8') as f: f.write(self.xml())
Save metadata to XML file
def get_expression_engine(self, name): try: return self.expression_engines[name] except KeyError: raise InvalidEngineError("Unsupported expression engine: {}".format(name))
Return an expression engine instance.
def normalizeFileFormatVersion(value): if not isinstance(value, int): raise TypeError("File format versions must be instances of " ":ref:`type-int`, not %s." % type(value).__name__) return value
Normalizes a font's file format version. * **value** must be a :ref:`type-int`. * Returned value will be a ``int``.
def double_click(self): self.scroll_to() ActionChains(self.parent.driver).double_click(self._element).perform()
Performs a double click in the element. Currently works only on Chrome driver.
def _add_somatic_opts(opts, paired): if "--min-alternate-fraction" not in opts and "-F" not in opts: min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
Add somatic options to current set. See _run_freebayes_paired for references.
def set_status(self, new_status): if new_status not in Report.allowed_statuses: raise Exception('status must be one of: %s' % (', '.join(Report.allowed_statuses))) self._data_fields['status'] = new_status.lower()
Set the status of the report. :param new_status: the new status of the report (either PASSED, FAILED or ERROR)
def get_next_invoke_id(self, addr): if _debug: StateMachineAccessPoint._debug("get_next_invoke_id") initialID = self.nextInvokeID while 1: invokeID = self.nextInvokeID self.nextInvokeID = (self.nextInvokeID + 1) % 256 if initialID == self.nextInvokeID: raise RuntimeError("no available invoke ID") for tr in self.clientTransactions: if (invokeID == tr.invokeID) and (addr == tr.pdu_address): break else: break return invokeID
Called by clients to get an unused invoke ID.
def view_list(self): done = set() ret = [] while len(done) != self.count(): p = self.view_indexes(done) if len(p) > 0: ret.append(p) return ret
return a list of polygon indexes lists for the waypoints
def token_delete(remote, token=''): session_key = token_session_key(remote.name) return session.pop(session_key, None)
Remove OAuth access tokens from session. :param remote: The remote application. :param token: Type of token to get. Data passed from ``oauth.request()`` to identify which token to retrieve. (Default: ``''``) :returns: The token.
def from_dict(cls, d): return cls( d['rargname'], d['value'], list(d.get('properties', {}).items()), d.get('optional', False) )
Instantiate a Role from a dictionary representation.
def _imm_delattr(self, name): if _imm_is_persist(self): values = _imm_value_data(self) if name in values: dd = object.__getattribute__(self, '__dict__') if name in dd: del dd[name] if name in _imm_const_data(self): _imm_check(imm, [name]) else: raise TypeError('Attempt to reset parameter \'%s\' of non-transient immutable' % name) else: return _imm_trans_delattr(self, name)
A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise raises an exception.
def GET_savedgetitemvalues(self) -> None: dict_ = state.getitemvalues.get(self._id) if dict_ is None: self.GET_getitemvalues() else: for name, value in dict_.items(): self._outputs[name] = value
Get the previously saved values of all |GetItem| objects.
def _generate_html(data, out): print('<html>', file=out) print('<body>', file=out) _generate_html_table(data, out, 0) print('</body>', file=out) print('</html>', file=out)
Generate report data as HTML
def transitDurationCircular(P, R_s, R_p, a, i): r if i is nan: i = 90 * aq.deg i = i.rescale(aq.rad) k = R_p / R_s b = (a * cos(i)) / R_s duration = (P / pi) * arcsin(((R_s * sqrt((1 + k) ** 2 - b ** 2)) / (a * sin(i))).simplified) return duration.rescale(aq.min)
r"""Estimation of the primary transit time. Assumes a circular orbit. .. math:: T_\text{dur} = \frac{P}{\pi}\sin^{-1} \left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right] Where :math:`T_\text{dur}` transit duration, P orbital period, :math:`R_\star` radius of the star, a is the semi-major axis, k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}` (Seager & Mallen-Ornelas 2003)
def timestamp(datetime_obj): start_of_time = datetime.datetime(1970, 1, 1) diff = datetime_obj - start_of_time return diff.total_seconds()
Return Unix timestamp as float. The number of seconds that have elapsed since January 1, 1970.
def _parse_player_data(self, player_data): for field in self.__dict__: short_field = str(field)[1:] if short_field == 'player_id' or \ short_field == 'index' or \ short_field == 'most_recent_season' or \ short_field == 'name' or \ short_field == 'weight' or \ short_field == 'height' or \ short_field == 'season': continue field_stats = [] if type(player_data) == dict: for year, data in player_data.items(): stats = pq(data['data']) value = self._parse_value(stats, short_field) field_stats.append(value) else: stats = pq(player_data) value = self._parse_value(stats, short_field) field_stats.append(value) setattr(self, field, field_stats)
Parse all player information and set attributes. Iterate through each class attribute to parse the data from the HTML page and set the attribute value with the result. Parameters ---------- player_data : dictionary or string If this class is inherited from the ``Player`` class, player_data will be a dictionary where each key is a string representing the season and each value contains the HTML data as a string. If this class is inherited from the ``BoxscorePlayer`` class, player_data will be a string representing the player's game statistics in HTML format.
def separate_namespace(qname): "Separates the namespace from the element" import re try: namespace, element_name = re.search('^{(.+)}(.+)$', qname).groups() except: namespace = None element_name = qname return namespace, element_name
Separates the namespace from the element
def _extract_player_stats(self, table, player_dict, home_or_away): for row in table('tbody tr').items(): player_id = self._find_player_id(row) if not player_id: continue name = self._find_player_name(row) try: player_dict[player_id]['data'] += str(row).strip() except KeyError: player_dict[player_id] = { 'name': name, 'data': str(row).strip(), 'team': home_or_away } return player_dict
Combine all player stats into a single object. Since each player generally has a couple of rows worth of stats (one for basic stats and another for advanced stats) on the boxscore page, both rows should be combined into a single string object to easily query all fields from a single object instead of determining which row to pull metrics from. Parameters ---------- table : PyQuery object A PyQuery object of a single boxscore table, such as the home team's advanced stats or the away team's basic stats. player_dict : dictionary A dictionary where each key is a string of the player's ID and each value is a dictionary where the values contain the player's name, HTML data, and a string constant indicating which team the player is a member of. home_or_away : string constant A string constant indicating whether the player plays for the home or away team. Returns ------- dictionary Returns a ``dictionary`` where each key is a string of the player's ID and each value is a dictionary where the values contain the player's name, HTML data, and a string constant indicating which team the player is a member of.
async def click(self, entity, reply_to=None, silent=False, clear_draft=False, hide_via=False): entity = await self._client.get_input_entity(entity) reply_id = None if reply_to is None else utils.get_message_id(reply_to) req = functions.messages.SendInlineBotResultRequest( peer=entity, query_id=self._query_id, id=self.result.id, silent=silent, clear_draft=clear_draft, hide_via=hide_via, reply_to_msg_id=reply_id ) return self._client._get_response_message( req, await self._client(req), entity)
Clicks this result and sends the associated `message`. Args: entity (`entity`): The entity to which the message of this result should be sent. reply_to (`int` | `Message <telethon.tl.custom.message.Message>`, optional): If present, the sent message will reply to this ID or message. silent (`bool`, optional): If ``True``, the sent message will not notify the user(s). clear_draft (`bool`, optional): Whether the draft should be removed after sending the message from this result or not. Defaults to ``False``. hide_via (`bool`, optional): Whether the "via @bot" should be hidden or not. Only works with certain bots (like @bing or @gif).
def _get_esxdatacenter_proxy_details(): det = __salt__['esxdatacenter.get_details']() return det.get('vcenter'), det.get('username'), det.get('password'), \ det.get('protocol'), det.get('port'), det.get('mechanism'), \ det.get('principal'), det.get('domain'), det.get('datacenter')
Returns the running esxdatacenter's proxy details
def _validate_str_list(arg): if isinstance(arg, six.binary_type): ret = [salt.utils.stringutils.to_unicode(arg)] elif isinstance(arg, six.string_types): ret = [arg] elif isinstance(arg, Iterable) and not isinstance(arg, Mapping): ret = [] for item in arg: if isinstance(item, six.string_types): ret.append(item) else: ret.append(six.text_type(item)) else: ret = [six.text_type(arg)] return ret
ensure ``arg`` is a list of strings
def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01, nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97, winfunc=lambda x:numpy.ones((x,))): highfreq= highfreq or samplerate/2 signal = sigproc.preemphasis(signal,preemph) frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate, winfunc) pspec = sigproc.powspec(frames,nfft) energy = numpy.sum(pspec,1) energy = numpy.where(energy == 0,numpy.finfo(float).eps,energy) fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq) feat = numpy.dot(pspec,fb.T) feat = numpy.where(feat == 0,numpy.finfo(float).eps,feat) return feat,energy
Compute Mel-filterbank energy features from an audio signal. :param signal: the audio signal from which to compute features. Should be an N*1 array :param samplerate: the sample rate of the signal we are working with, in Hz. :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds) :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds) :param nfilt: the number of filters in the filterbank, default 26. :param nfft: the FFT size. Default is 512. :param lowfreq: lowest band edge of mel filters. In Hz, default is 0. :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2 :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97. :param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming :returns: 2 values. The first is a numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. The second return value is the energy in each frame (total energy, unwindowed)
def _update_ret(ret, goids, go2color): if goids: ret['GOs'].update(goids) if go2color: for goid, color in go2color.items(): ret['go2color'][goid] = color
Update 'GOs' and 'go2color' in dict with goids and go2color.
def fei_metadata(self): if not self.is_fei: return None tags = self.pages[0].tags if 'FEI_SFEG' in tags: return tags['FEI_SFEG'].value if 'FEI_HELIOS' in tags: return tags['FEI_HELIOS'].value return None
Return FEI metadata from SFEG or HELIOS tags as dict.
def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue): return formatarg(name) + formatvalue(locals[name]) specs = [] for i in range(len(args)): specs.append(strseq(args[i], convert, join)) if varargs: specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) if varkw: specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) return '(' + string.join(specs, ', ') + ')'
Format an argument spec from the 4 values returned by getargvalues. The first four arguments are (args, varargs, varkw, locals). The next four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.
def get_storage(self, id_or_uri): uri = self.URI + "/{}/storage".format(extract_id_from_uri(id_or_uri)) return self._client.get(uri)
Get storage details of an OS Volume. Args: id_or_uri: ID or URI of the OS Volume. Returns: dict: Storage details
def random_point_triangle(triangle, use_int_coords=True): xs, ys = triangle.exterior.coords.xy A, B, C = zip(xs[:-1], ys[:-1]) r1, r2 = np.random.rand(), np.random.rand() rx, ry = (1 - sqrt(r1)) * np.asarray(A) + sqrt(r1) * (1 - r2) * np.asarray(B) + sqrt(r1) * r2 * np.asarray(C) if use_int_coords: rx, ry = round(rx), round(ry) return Point(int(rx), int(ry)) return Point(rx, ry)
Selects a random point in interior of a triangle
def update(self, **kwargs): tmos_version = self._meta_data['bigip'].tmos_version if LooseVersion(tmos_version) > LooseVersion('12.0.0'): msg = "Update() is unsupported for User on version %s. " \ "Utilize Modify() method instead" % tmos_version raise UnsupportedOperation(msg) else: self._update(**kwargs)
Due to a password decryption bug we will disable update() method for 12.1.0 and up
def free_processed_queue(self): with self._lock: if len(self._processed_coordinators) > 0: for _coordinator in self._processed_coordinators: _coordinator.free_resources() self._processed_coordinators = []
call the Aspera sdk to freeup resources
def _get_conda_channels(conda_bin): channels = ["bioconda", "conda-forge"] out = [] config = yaml.safe_load(subprocess.check_output([conda_bin, "config", "--show"])) for c in channels: present = False for orig_c in config.get("channels") or []: if orig_c.endswith((c, "%s/" % c)): present = True break if not present: out += ["-c", c] return out
Retrieve default conda channels, checking if they are pre-specified in config. This allows users to override defaults with specific mirrors in their .condarc
def select_uri_implementation(ecore_model_path): if URL_PATTERN.match(ecore_model_path): return pyecore.resources.resource.HttpURI return pyecore.resources.URI
Select the right URI implementation regarding the Ecore model path schema.
def _from_dict(cls, _dict): args = {} if 'contentItems' in _dict: args['content_items'] = [ ContentItem._from_dict(x) for x in (_dict.get('contentItems')) ] else: raise ValueError( 'Required property \'contentItems\' not present in Content JSON' ) return cls(**args)
Initialize a Content object from a json dictionary.
def medial_axis(self, resolution=None, clip=None): if resolution is None: resolution = self.scale / 1000.0 from .exchange.misc import edges_to_path edge_vert = [polygons.medial_axis(i, resolution, clip) for i in self.polygons_full] medials = [Path2D(**edges_to_path( edges=e, vertices=v)) for e, v in edge_vert] medial = concatenate(medials) return medial
Find the approximate medial axis based on a voronoi diagram of evenly spaced points on the boundary of the polygon. Parameters ---------- resolution : None or float Distance between each sample on the polygon boundary clip : None, or (2,) float Min, max number of samples Returns ---------- medial : Path2D object Contains only medial axis of Path
def get_option_as_list(self, optionname, delimiter=",", default=None): option = self.get_option(optionname) try: opt_list = [opt.strip() for opt in option.split(delimiter)] return list(filter(None, opt_list)) except Exception: return default
Will try to return the option as a list separated by the delimiter.
def cached_query(qs, timeout=None): cache_key = generate_cache_key(qs) return get_cached(cache_key, list, args=(qs,), timeout=None)
Auto cached queryset and generate results.
def deep_copy(item_original): item = copy.copy(item_original) item._id = uuid.uuid4().hex if hasattr(item, '_children') and len(item._children) > 0: children_new = collections.OrderedDict() for subitem_original in item._children.values(): subitem = deep_copy(subitem_original) subitem._parent = item children_new[subitem.get_name()] = subitem item._children = children_new return item
Return a recursive deep-copy of item where each copy has a new ID.
def clean(self, value): if value: value = value.replace('-', '').replace(' ', '') self.card_type = verify_credit_card(value) if self.card_type is None: raise forms.ValidationError("Invalid credit card number.") return value
Raises a ValidationError if the card is not valid and stashes card type.
def _max_weight_operator(ops: Iterable[PauliTerm]) -> Union[None, PauliTerm]: mapping = dict() for op in ops: for idx, op_str in op: if idx in mapping: if mapping[idx] != op_str: return None else: mapping[idx] = op_str op = functools.reduce(mul, (PauliTerm(op, q) for q, op in mapping.items()), sI()) return op
Construct a PauliTerm operator by taking the non-identity single-qubit operator at each qubit position. This function will return ``None`` if the input operators do not share a natural tensor product basis. For example, the max_weight_operator of ["XI", "IZ"] is "XZ". Asking for the max weight operator of something like ["XI", "ZI"] will return None.
def get_cairo_export_info(self, filetype): export_dlg = CairoExportDialog(self.main_window, filetype=filetype) if export_dlg.ShowModal() == wx.ID_OK: info = export_dlg.get_info() export_dlg.Destroy() return info else: export_dlg.Destroy()
Shows Cairo export dialog and returns info Parameters ---------- filetype: String in ["pdf", "svg"] \tFile type for which export info is gathered
def _validate_rel(param, rels): if param.field.count('/') > 1: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is attempting to ' 'filter on a nested relationship which is not ' 'currently supported.' % param, 'links': LINK, 'parameter': PARAM, }) elif '/' in param.field: model_field = param.field.split('/')[0] if model_field not in rels: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is attempting to ' 'filter on a relationship but the "%s" field is ' 'NOT a relationship field.' % (param, model_field), 'links': LINK, 'parameter': PARAM, })
Validate relationship based filters We don't support nested filters currently. FIX: Ensure the relationship filter field exists on the relationships model!
def profile_create(name, config=None, devices=None, description=None, remote_addr=None, cert=None, key=None, verify_cert=True): client = pylxd_client_get(remote_addr, cert, key, verify_cert) config, devices = normalize_input_values( config, devices ) try: profile = client.profiles.create(name, config, devices) except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) if description is not None: profile.description = description pylxd_save_object(profile) return _pylxd_model_to_dict(profile)
Creates a profile. name : The name of the profile to get. config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). description : A description string or None (None = unset). remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}" $ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}" See the `lxd-docs`_ for the details about the config and devices dicts. .. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10
def get(self, obj, **kwargs): assert self.getter is not None, "Getter accessor is not specified." if callable(self.getter): return self.getter(obj, **_get_context(self._getter_argspec, kwargs)) assert isinstance(self.getter, string_types), "Accessor must be a function or a dot-separated string." for attr in self.getter.split("."): if isinstance(obj, dict): obj = obj[attr] else: obj = getattr(obj, attr) if callable(obj): return obj() return obj
Get an attribute from a value. :param obj: Object to get the attribute value from. :return: Value of object's attribute.
def add_network(self, network, netmask, area=0): if network == '' or netmask == '': raise ValueError('network and mask values ' 'may not be empty') cmd = 'network {}/{} area {}'.format(network, netmask, area) return self.configure_ospf(cmd)
Adds a network to be advertised by OSPF Args: network (str): The network to be advertised in dotted decimal notation netmask (str): The netmask to configure area (str): The area the network belongs to. By default this value is 0 Returns: bool: True if the command completes successfully Exception: ValueError: This will get raised if network or netmask are not passed to the method
def deck_issue_mode(proto: DeckSpawnProto) -> Iterable[str]: if proto.issue_mode == 0: yield "NONE" return for mode, value in proto.MODE.items(): if value > proto.issue_mode: continue if value & proto.issue_mode: yield mode
interpret issue mode bitfeg
def _extract_specs_dependencies(specs): deps = set() for signatures in specs.functions.values(): for signature in signatures: for t in signature: deps.update(pytype_to_deps(t)) for signature in specs.capsules.values(): for t in signature: deps.update(pytype_to_deps(t)) return sorted(deps, key=lambda x: "include" not in x)
Extract types dependencies from specs for each exported signature.
def get_attribute_from_config(config, section, attribute): section = config.get(section) if section: option = section.get(attribute) if option: return option raise ConfigurationError("Config file badly formed!\n" "Failed to get attribute '{}' from section '{}'!" .format(attribute, section))
Try to parse an attribute of the config file. Args: config (defaultdict): A defaultdict. section (str): The section of the config file to get information from. attribute (str): The attribute of the section to fetch. Returns: str: The string corresponding to the section and attribute. Raises: ConfigurationError
def pin_add(self, path, *paths, **kwargs): if "recursive" in kwargs: kwargs.setdefault("opts", {"recursive": kwargs.pop("recursive")}) args = (path,) + paths return self._client.request('/pin/add', args, decoder='json', **kwargs)
Pins objects to local storage. Stores an IPFS object(s) from a given path locally to disk. .. code-block:: python >>> c.pin_add("QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d") {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} Parameters ---------- path : str Path to object(s) to be pinned recursive : bool Recursively unpin the object linked to by the specified object(s) Returns ------- dict : List of IPFS objects that have been pinned
def _convert_datetime_str(response): if response: return dict([(k, '{0}'.format(v)) if isinstance(v, datetime.date) else (k, v) for k, v in six.iteritems(response)]) return None
modify any key-value pair where value is a datetime object to a string.
def marching_cubes(self): meshed = matrix_to_marching_cubes(matrix=self.matrix, pitch=self.pitch, origin=self.origin) return meshed
A marching cubes Trimesh representation of the voxels. No effort was made to clean or smooth the result in any way; it is merely the result of applying the scikit-image measure.marching_cubes function to self.matrix. Returns --------- meshed: Trimesh object representing the current voxel object, as returned by marching cubes algorithm.
def cross_product_compare(start, candidate1, candidate2): delta1 = candidate1 - start delta2 = candidate2 - start return cross_product(delta1, delta2)
Compare two relative changes by their cross-product. This is meant to be a way to determine which vector is more "inside" relative to ``start``. .. note:: This is a helper for :func:`_simple_convex_hull`. Args: start (numpy.ndarray): The start vector (as 1D NumPy array with 2 elements). candidate1 (numpy.ndarray): The first candidate vector (as 1D NumPy array with 2 elements). candidate2 (numpy.ndarray): The second candidate vector (as 1D NumPy array with 2 elements). Returns: float: The cross product of the two differences.
async def stop(self, **kwargs): _LOGGER.debug('Shutting down pairing server') if self._web_server is not None: await self._web_server.shutdown() self._server.close() if self._server is not None: await self._server.wait_closed()
Stop pairing server and unpublish service.
def _combined_grouping_values(grouping_name,collection_a,collection_b): new_grouping= collection_a.groupings.get(grouping_name,{}).copy() new_grouping.update(collection_b.groupings.get(grouping_name,{})) return new_grouping
returns a dict with values from both collections for a given grouping name Warning: collection2 overrides collection1 if there is a group_key conflict
def import_localities(path, delimiter=';'): creates = [] updates = [] with open(path, mode="r") as infile: reader = csv.DictReader(infile, delimiter=str(delimiter)) with atomic(): for row in reader: row['point'] = Point(float(row['longitude']), float(row['latitude'])) locality, created = Locality.objects.update_or_create( id=row['id'], defaults=row ) if created: creates.append(locality) else: updates.append(locality) return creates, updates
Import localities from a CSV file. :param path: Path to the CSV file containing the localities.
def merge_graphs(main_graph, addition_graph): node_mapping = {} edge_mapping = {} for node in addition_graph.get_all_node_objects(): node_id = node['id'] new_id = main_graph.new_node() node_mapping[node_id] = new_id for edge in addition_graph.get_all_edge_objects(): edge_id = edge['id'] old_vertex_a_id, old_vertex_b_id = edge['vertices'] new_vertex_a_id = node_mapping[old_vertex_a_id] new_vertex_b_id = node_mapping[old_vertex_b_id] new_edge_id = main_graph.new_edge(new_vertex_a_id, new_vertex_b_id) edge_mapping[edge_id] = new_edge_id return node_mapping, edge_mapping
Merges an ''addition_graph'' into the ''main_graph''. Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.
def generate_legacy_webfinger(template=None, *args, **kwargs): if template == "diaspora": webfinger = DiasporaWebFinger(*args, **kwargs) else: webfinger = BaseLegacyWebFinger(*args, **kwargs) return webfinger.render()
Generate a legacy webfinger XRD document. Template specific key-value pairs need to be passed as ``kwargs``, see classes. :arg template: Ready template to fill with args, for example "diaspora" (optional) :returns: Rendered XRD document (str)
def Acf(poly, dist, N=None, **kws): if N is None: N = len(poly)/2 + 1 corr = Corr(poly, dist, **kws) out = numpy.empty(N) for n in range(N): out[n] = numpy.mean(corr.diagonal(n), 0) return out
Auto-correlation function. Args: poly (Poly): Polynomial of interest. Must have ``len(poly) > N``. dist (Dist): Defines the space the correlation is taken on. N (int): The number of time steps appart included. If omited set to ``len(poly)/2+1``. Returns: (numpy.ndarray) : Auto-correlation of ``poly`` with shape ``(N,)``. Note that by definition ``Q[0]=1``. Examples: >>> poly = chaospy.prange(10)[1:] >>> Z = chaospy.Uniform() >>> print(numpy.around(chaospy.Acf(poly, Z, 5), 4)) [1. 0.9915 0.9722 0.9457 0.9127]
def new_registry_ont_id_transaction(self, ont_id: str, pub_key: str or bytes, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: if isinstance(pub_key, str): bytes_ctrl_pub_key = bytes.fromhex(pub_key) elif isinstance(pub_key, bytes): bytes_ctrl_pub_key = pub_key else: raise SDKException(ErrorCode.param_err('a bytes or str type of public key is required.')) args = dict(ontid=ont_id.encode('utf-8'), ctrl_pk=bytes_ctrl_pub_key) tx = self.__generate_transaction('regIDWithPublicKey', args, b58_payer_address, gas_limit, gas_price) return tx
This interface is used to generate a Transaction object which is used to register ONT ID. :param ont_id: OntId. :param pub_key: the hexadecimal public key in the form of string. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which is used to register ONT ID.
def scrap(self, url=None, scheme=None, timeout=None, html_parser=None, cache_ext=None ): if not url: url = self.url if not scheme: scheme = self.scheme if not timeout: timeout = self.timeout if not html_parser: html_parser = self.html_parser if not scheme: raise WEBParameterException("Missing scheme definition") if not url: raise WEBParameterException("Missing url definition") resp = self.get(url, timeout, cache_ext=cache_ext) soup = BeautifulSoup(resp.html, html_parser) resp.scraped = self._parse_scheme(soup, scheme) return resp
Scrap a url and parse the content according to scheme :param url: Url to parse (default: self._url) :type url: str :param scheme: Scheme to apply to html (default: self._scheme) :type scheme: dict :param timeout: Timeout for http operation (default: self._timout) :type timeout: float :param html_parser: What html parser to use (default: self._html_parser) :type html_parser: str | unicode :param cache_ext: External cache info :type cache_ext: floscraper.models.CacheInfo :return: Response data from url and parsed info :rtype: floscraper.models.Response :raises WEBConnectException: HTTP get failed :raises WEBParameterException: Missing scheme or url
def example_generator(self, encoder, tmp_dir, task_id): filepaths = self.text_filepaths_for_task(tmp_dir, task_id) if task_id >= self.num_train_shards: max_chars_per_file = self.max_dev_chars // ( self.num_dev_shards * len(filepaths)) else: max_chars_per_file = None tokens = [] for ftext in self.file_generator( filepaths, max_chars_per_file=max_chars_per_file): tokens.extend(encoder.encode(ftext)) pos = 0 while pos + self.sequence_length <= len(tokens): yield {"targets": tokens[pos:pos + self.sequence_length]} pos += self.sequence_length if pos > 0: tokens = tokens[pos:] if self.remainder_policy == "pad": if tokens: targets = tokens + [0] * (self.sequence_length - len(tokens)) yield {"targets": targets} else: assert self.remainder_policy == "drop"
Generator for examples. Args: encoder: a TextEncoder tmp_dir: a string task_id: an integer Yields: feature dictionaries
def setup_data(self, data, params): check_required_aesthetics( self.REQUIRED_AES, data.columns, self.__class__.__name__) return data
Verify & return data
def finalize_env(env): keys = _PLATFORM_ENV_KEYS.get(sys.platform, []) if 'PATH' not in keys: keys.append('PATH') results = { key: os.environ.get(key, '') for key in keys } results.update(env) return results
Produce a platform specific env for passing into subprocess.Popen family of external process calling methods, and the supplied env will be updated on top of it. Returns a new env.
def symmetric_difference_update(self, other): r other = self._as_multiset(other) elements = set(self.distinct_elements()) | set(other.distinct_elements()) for element in elements: multiplicity = self[element] other_count = other[element] self[element] = (multiplicity - other_count if multiplicity > other_count else other_count - multiplicity)
r"""Update the multiset to contain only elements in either this multiset or the other but not both. >>> ms = Multiset('aab') >>> ms.symmetric_difference_update('abc') >>> sorted(ms) ['a', 'c'] You can also use the ``^=`` operator for the same effect. However, the operator version will only accept a set as other operator, not any iterable, to avoid errors. >>> ms = Multiset('aabbbc') >>> ms ^= Multiset('abd') >>> sorted(ms) ['a', 'b', 'b', 'c', 'd'] For a variant of the operation which does not modify the multiset, but returns a new multiset instead see :meth:`symmetric_difference`. Args: other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T] or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
def best_precursor(clus, loci): data_loci = sort_precursor(clus, loci) current_size = data_loci[0][5] best = 0 for item, locus in enumerate(data_loci): if locus[3] - locus[2] > 70: if locus[5] > current_size * 0.8: best = item break best_loci = data_loci[best] del data_loci[best] data_loci.insert(0, best_loci) return data_loci
Select best precursor asuming size around 100 nt
def hgetall(self, key): def format_response(value): return dict(zip(value[::2], value[1::2])) return self._execute( [b'HGETALL', key], format_callback=format_response)
Returns all fields and values of the has stored at `key`. The underlying redis `HGETALL`_ command returns an array of pairs. This method converts that to a Python :class:`dict`. It will return an empty :class:`dict` when the key is not found. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the size of the hash. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :returns: a :class:`dict` of key to value mappings for all fields in the hash .. _HGETALL: http://redis.io/commands/hgetall
def set_rule(self, name, properties): self._rule_attrs.append(name) setattr(self, name, properties)
Set a rules as object attribute. Arguments: name (string): Rule name to set as attribute name. properties (dict): Dictionnary of properties.
def is_ignored(resource): ignored_domains = current_app.config['LINKCHECKING_IGNORE_DOMAINS'] url = resource.url if url: parsed_url = urlparse(url) return parsed_url.netloc in ignored_domains return True
Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS
def pre_process_method_headers(method, headers): method = method.lower() _wsgi_headers = ["content_length", "content_type", "query_string", "remote_addr", "remote_host", "remote_user", "request_method", "server_name", "server_port"] _transformed_headers = {} for header, value in headers.items(): header = header.replace("-", "_") header = "http_{header}".format( header=header) if header.lower() not in _wsgi_headers else header _transformed_headers.update({header.upper(): value}) return method, _transformed_headers
Returns the lowered method. Capitalize headers, prepend HTTP_ and change - to _.
def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for data in documents: dumper.represent(data) dumper.close() finally: dumper.dispose() if getvalue: return getvalue()
Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead.
def characterSet(self, charset: str) -> None: charset_node = self._find_charset_node() or Meta(parent=self.head) charset_node.setAttribute('charset', charset)
Set character set of this document.
def parse_line(self, line: str) -> None: if line[0].isspace(): if self._last_key is None: raise HTTPInputError("first header line cannot start with whitespace") new_part = " " + line.lstrip() self._as_list[self._last_key][-1] += new_part self._dict[self._last_key] += new_part else: try: name, value = line.split(":", 1) except ValueError: raise HTTPInputError("no colon in header line") self.add(name, value.strip())
Updates the dictionary with a single header line. >>> h = HTTPHeaders() >>> h.parse_line("Content-Type: text/html") >>> h.get('content-type') 'text/html'
def amdf(lag, size): filt = (1 - z ** -lag).linearize() @tostream def amdf_filter(sig, zero=0.): return maverage(size)(abs(filt(sig, zero=zero)), zero=zero) return amdf_filter
Average Magnitude Difference Function non-linear filter for a given size and a fixed lag. Parameters ---------- lag : Time lag, in samples. See ``freq2lag`` if needs conversion from frequency values. size : Moving average size. Returns ------- A callable that accepts two parameters: a signal ``sig`` and the starting memory element ``zero`` that behaves like the ``LinearFilter.__call__`` arguments. The output from that callable is a Stream instance, and has no decimation applied. See Also -------- freq2lag : Frequency (in rad/sample) to lag (in samples) converter.
def addcommenttocommit(self, project_id, author, sha, path, line, note): data = { 'author': author, 'note': note, 'path': path, 'line': line, 'line_type': 'new' } request = requests.post( '{0}/{1}/repository/commits/{2}/comments'.format(self.projects_url, project_id, sha), headers=self.headers, data=data, verify=self.verify_ssl) if request.status_code == 201: return True else: return False
Adds an inline comment to a specific commit :param project_id: project id :param author: The author info as returned by create mergerequest :param sha: The name of a repository branch or tag or if not given the default branch :param path: The file path :param line: The line number :param note: Text of comment :return: True or False
def _reverse_index(self): if self.y == 0: self.display = [u" " * self.size[1]] + self.display[:-1] else: self.y -= 1
Move the cursor up one row in the same column. If the cursor is at the first row, create a new row at the top.
def tables(self): with self.conn.cursor() as cur: cur.execute(self.TABLES_QUERY) for row in cur: yield row
Yields table names.
def union(self, other): if self._slideDuration != other._slideDuration: raise ValueError("the two DStream should have same slide duration") return self.transformWith(lambda a, b: a.union(b), other, True)
Return a new DStream by unifying data of another DStream with this DStream. @param other: Another DStream having the same interval (i.e., slideDuration) as this DStream.
def next(self): if not self._cache: self._cache = self._get_results() self._retrieved += len(self._cache) if not self._cache: raise StopIteration() return self._cache.pop(0)
Provide iteration capabilities Use a small object cache for performance
def manage_request_types_view(request): request_types = RequestType.objects.all() return render_to_response('manage_request_types.html', { 'page_name': "Admin - Manage Request Types", 'request_types': request_types }, context_instance=RequestContext(request))
Manage requests. Display a list of request types with links to edit them. Also display a link to add a new request type. Restricted to presidents and superadmins.
def run(self, stop): _LOGGER.info("Starting a new pipeline on group %s", self._group) self._group.bridge.incr_active() for i, stage in enumerate(self._pipe): self._execute_stage(i, stage, stop) _LOGGER.info("Finished pipeline on group %s", self._group) self._group.bridge.decr_active()
Run the pipeline. :param stop: Stop event
def via_scan(): import socket import ipaddress import httpfind bridges_from_scan = [] hosts = socket.gethostbyname_ex(socket.gethostname())[2] for host in hosts: bridges_from_scan += httpfind.survey( ipaddress.ip_interface(host+'/24').network, path='description.xml', pattern='(P|p)hilips') logger.info('Scan on %s', host) logger.info('Scan returned %d Hue bridges(s).', len(bridges_from_scan)) found_bridges = {} for bridge in bridges_from_scan: serial, bridge_info = parse_description_xml(bridge) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('Scan returned nothing')
IP scan - now implemented
def freeze(self, tmp_dir): for sfile in self.secrets(): src_file = hard_path(sfile, self.opt.secrets) if not os.path.exists(src_file): raise aomi_excep.IceFile("%s secret not found at %s" % (self, src_file)) dest_file = "%s/%s" % (tmp_dir, sfile) dest_dir = os.path.dirname(dest_file) if not os.path.isdir(dest_dir): os.mkdir(dest_dir, 0o700) shutil.copy(src_file, dest_file) LOG.debug("Froze %s %s", self, sfile)
Copies a secret into a particular location
def _delegate_required(self, path): fs = self._delegate(path) if fs is None: raise errors.ResourceNotFound(path) return fs
Check that there is a filesystem with the given ``path``.
def Define_TreeTable(self, heads, heads2=None): display_heads = [] display_heads.append(tuple(heads[2:])) self.tree_table = TreeTable() self.tree_table.append_from_list(display_heads, fill_title=True) if heads2 is not None: heads2_color = heads2[1] row_widget = gui.TableRow() for index, field in enumerate(heads2[2:]): row_item = gui.TableItem(text=field, style={'background-color': heads2_color}) row_widget.append(row_item, field) self.tree_table.append(row_widget, heads2[0]) self.wid.append(self.tree_table)
Define a TreeTable with a heading row and optionally a second heading row.
def is_translocated(graph: BELGraph, node: BaseEntity) -> bool: return _node_has_modifier(graph, node, TRANSLOCATION)
Return true if over any of the node's edges, it is translocated.
def DeserializeUnsigned(self, reader): self.Version = reader.ReadUInt32() self.PrevHash = reader.ReadUInt256() self.MerkleRoot = reader.ReadUInt256() self.Timestamp = reader.ReadUInt32() self.Index = reader.ReadUInt32() self.ConsensusData = reader.ReadUInt64() self.NextConsensus = reader.ReadUInt160()
Deserialize unsigned data only. Args: reader (neo.IO.BinaryReader):
def _on_github_request(self, future, response): try: content = escape.json_decode(response.body) except ValueError as error: future.set_exception(Exception('Github error: %s' % response.body)) return if 'error' in content: future.set_exception(Exception('Github error: %s' % str(content['error']))) return future.set_result(content)
Invoked as a response to the GitHub API request. Will decode the response and set the result for the future to return the callback or raise an exception
def dry_run(self): if self.database_current_migration is None: self.printer( u'~> Woulda initialized: %s\n' % self.name_for_printing()) return u'inited' migrations_to_run = self.migrations_to_run() if migrations_to_run: self.printer( u'~> Woulda updated %s:\n' % self.name_for_printing()) for migration_number, migration_func in migrations_to_run(): self.printer( u' + Would update %s, "%s"\n' % ( migration_number, migration_func.func_name)) return u'migrated'
Print out a dry run of what we would have upgraded.
def _create_worker(self, method, *args, **kwargs): thread = QThread() worker = ClientWorker(method, args, kwargs) worker.moveToThread(thread) worker.sig_finished.connect(self._start) worker.sig_finished.connect(thread.quit) thread.started.connect(worker.start) self._queue.append(thread) self._threads.append(thread) self._workers.append(worker) self._start() return worker
Create a worker for this client to be run in a separate thread.
def repoExitError(message): wrapper = textwrap.TextWrapper( break_on_hyphens=False, break_long_words=False) formatted = wrapper.fill("{}: error: {}".format(sys.argv[0], message)) sys.exit(formatted)
Exits the repo manager with error status.
def get_instance(self, payload): return FactorInstance( self._version, payload, service_sid=self._solution['service_sid'], identity=self._solution['identity'], )
Build an instance of FactorInstance :param dict payload: Payload response from the API :returns: twilio.rest.authy.v1.service.entity.factor.FactorInstance :rtype: twilio.rest.authy.v1.service.entity.factor.FactorInstance
def start_pymol(quiet=False, options='-p', run=False): import pymol pymol.pymol_argv = ['pymol', '%s' % options] + sys.argv[1:] if run: initialize_pymol(options) if quiet: pymol.cmd.feedback('disable', 'all', 'everything')
Starts up PyMOL and sets general options. Quiet mode suppresses all PyMOL output. Command line options can be passed as the second argument.
def to_even_columns(data, headers=None): result = '' col_width = max(len(word) for row in data for word in row) + 2 if headers: header_width = max(len(word) for row in headers for word in row) + 2 if header_width > col_width: col_width = header_width result += "".join(word.ljust(col_width) for word in headers) + "\n" result += '-' * col_width * len(headers) + "\n" for row in data: result += "".join(word.ljust(col_width) for word in row) + "\n" return result
Nicely format the 2-dimensional list into evenly spaced columns
def has_bitshifts(self): def _has_bitshifts(expr): if isinstance(expr, pyvex.IRExpr.Binop): return expr.op.startswith("Iop_Shl") or expr.op.startswith("Iop_Shr") \ or expr.op.startswith("Iop_Sar") return False found_bitops = False for block in self._function.blocks: if block.size == 0: continue for stmt in block.vex.statements: if isinstance(stmt, pyvex.IRStmt.Put): found_bitops = found_bitops or _has_bitshifts(stmt.data) elif isinstance(stmt, pyvex.IRStmt.WrTmp): found_bitops = found_bitops or _has_bitshifts(stmt.data) if found_bitops: break if found_bitops: return { CodeTags.HAS_BITSHIFTS } return None
Detects if there is any bitwise operation in the function. :return: Tags.
def _get_raw_data(self, is_valid_key, data_key): result = None if self._read_imu(): data = self._imu.getIMUData() if data[is_valid_key]: raw = data[data_key] result = { 'x': raw[0], 'y': raw[1], 'z': raw[2] } return result
Internal. Returns the specified raw data from the IMU when valid
def add_string_as_file(self, content, filename, pred=None): summary = content.splitlines()[0] if content else '' if not isinstance(summary, six.string_types): summary = content.decode('utf8', 'ignore') if not self.test_predicate(cmd=False, pred=pred): self._log_info("skipped string ...'%s' due to predicate (%s)" % (summary, self.get_predicate(pred=pred))) return self.copy_strings.append((content, filename)) self._log_debug("added string ...'%s' as '%s'" % (summary, filename))
Add a string to the archive as a file named `filename`