code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def write(self, s): if self.comptype == "gz": self.crc = self.zlib.crc32(s, self.crc) self.pos += len(s) if self.comptype != "tar": s = self.cmp.compress(s) self.__write(s)
Write string s to the stream.
def get(self, href): if self.is_fake: return uid = _trim_suffix(href, ('.ics', '.ical', '.vcf')) etesync_item = self.collection.get(uid) if etesync_item is None: return None try: item = vobject.readOne(etesync_item.content) except Exception as e: raise RuntimeError("Failed to parse item %r in %r" % (href, self.path)) from e last_modified = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time())) return EteSyncItem(self, item, href, last_modified=last_modified, etesync_item=etesync_item)
Fetch a single item.
def _cast(cls, base_info, take_ownership=True): type_value = base_info.type.value try: new_obj = cast(base_info, cls.__types[type_value]) except KeyError: new_obj = base_info if take_ownership: assert not base_info.__owns new_obj._take_ownership() return new_obj
Casts a GIBaseInfo instance to the right sub type. The original GIBaseInfo can't have ownership. Will take ownership.
def create_secret(self, value, contributor, metadata=None, expires=None): if metadata is None: metadata = {} secret = self.create( value=value, contributor=contributor, metadata=metadata, expires=expires, ) return str(secret.handle)
Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle
def dump(self, fname): with open(fname, 'wb') as f: f.write(self.output)
Saves TZX file to fname
def record(self, chunk_size = None, dfmt = "f", channels = 1, rate = DEFAULT_SAMPLE_RATE, **kwargs ): if chunk_size is None: chunk_size = chunks.size if hasattr(self, "api"): kwargs.setdefault("input_device_index", self.api["defaultInputDevice"]) channels = kwargs.pop("nchannels", channels) input_stream = RecStream(self, self._pa.open(format=_STRUCT2PYAUDIO[dfmt], channels=channels, rate=rate, frames_per_buffer=chunk_size, input=True, **kwargs), chunk_size, dfmt ) self._recordings.append(input_stream) return input_stream
Records audio from device into a Stream. Parameters ---------- chunk_size : Number of samples per chunk (block sent to device). dfmt : Format, as in chunks(). Default is "f" (Float32). channels : Channels in audio stream (serialized). rate : Sample rate (same input used in sHz). Returns ------- Endless Stream instance that gather data from the audio input device.
def merge_pres_feats(pres, features): sub = [] for psub, fsub in zip(pres, features): exp = [] for pexp, fexp in zip(psub, fsub): lst = [] for p, f in zip(pexp, fexp): p.update(f) lst.append(p) exp.append(lst) sub.append(exp) return sub
Helper function to merge pres and features to support legacy features argument
def get_arg_value_as_type(self, key, default=None, convert_int=False): val = self.get_query_argument(key, default) if isinstance(val, int): return val if val.lower() in ['true', 'yes']: return True if val.lower() in ['false', 'no']: return False return val
Allow users to pass through truthy type values like true, yes, no and get to a typed variable in your code :param str val: The string reprensentation of the value you want to convert :returns: adapted value :rtype: dynamic
def do_printActivities(self,args): parser = CommandArgumentParser("printActivities") parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); args = vars(parser.parse_args(args)) refresh = args['refresh'] or not self.activities if refresh: response = self.client.describe_scaling_activities(AutoScalingGroupName=self.scalingGroup) self.activities = response['Activities'] index = 0 for activity in self.activities: print "{}: {} -> {} {}: {}".format(index,activity['StartTime'],stdplus.defaultifyDict(activity,'EndTime',''),activity['StatusCode'],activity['Description']) index = index + 1
Print scaling activities
def _contextMenu(self, pos): menu = QMenu(self) menu.addAction(self._zoomBackAction) plotArea = self.getWidgetHandle() globalPosition = plotArea.mapToGlobal(pos) menu.exec_(globalPosition)
Handle plot area customContextMenuRequested signal. :param QPoint pos: Mouse position relative to plot area
def get(url, params={}): request_url = url if len(params): request_url = "{}?{}".format(url, urlencode(params)) try: req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'}) response = json.loads(urlopen(req).read().decode("utf-8")) return response except HTTPError as err: raise MtgException(err.read())
Invoke an HTTP GET request on a url Args: url (string): URL endpoint to request params (dict): Dictionary of url parameters Returns: dict: JSON response as a dictionary
def equipable_classes(self): sitem = self._schema_item return [c for c in sitem.get("used_by_classes", self.equipped.keys()) if c]
Returns a list of classes that _can_ use the item.
def oauth2(self): if self._url.endswith("/oauth2"): url = self._url else: url = self._url + "/oauth2" return _oauth2.oauth2(oauth_url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
returns the oauth2 class
def failure_count(self): return len([i for i, result in enumerate(self.data) if result.failure])
Amount of failed test cases in this list. :return: integer
def uri(self): if self._uds_path: uri = 'mongodb://%s' % (quote_plus(self._uds_path),) else: uri = 'mongodb://%s' % (format_addr(self._address),) return uri + '/?ssl=true' if self._ssl else uri
Connection string to pass to `~pymongo.mongo_client.MongoClient`.
def remote_run(cmd, instance_name, detach=False, retries=1): if detach: cmd = SCREEN.format(command=cmd) args = SSH.format(instance_name=instance_name).split() args.append(cmd) for i in range(retries + 1): try: if i > 0: tf.logging.info("Retry %d for %s", i, args) return sp.check_call(args) except sp.CalledProcessError as e: if i == retries: raise e
Run command on GCS instance, optionally detached.
def setCentralWidget(self, widget): self._centralWidget = widget if widget is not None: widget.setParent(self) widget.installEventFilter(self) effect = QtGui.QGraphicsDropShadowEffect(self) effect.setColor(QtGui.QColor('black')) effect.setBlurRadius(80) effect.setOffset(0, 0) widget.setGraphicsEffect(effect)
Sets the central widget for this overlay to the inputed widget. :param widget | <QtGui.QWidget>
def from_json(cls, data, result=None): if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) tags = data.get("tags", {}) node_id = data.get("id") lat = data.get("lat") lon = data.get("lon") attributes = {} ignore = ["type", "id", "lat", "lon", "tags"] for n, v in data.items(): if n in ignore: continue attributes[n] = v return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
Create new Node element from JSON data :param data: Element data from JSON :type data: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of Node :rtype: overpy.Node :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
def _select_concept(self, line): g = self.current['graph'] if not line: out = g.all_skos_concepts using_pattern = False else: using_pattern = True if line.isdigit(): line = int(line) out = g.get_skos(line) if out: if type(out) == type([]): choice = self._selectFromList(out, using_pattern, "concept") if choice: self.currentEntity = {'name': choice.locale or choice.uri, 'object': choice, 'type': 'concept'} else: self.currentEntity = {'name': out.locale or out.uri, 'object': out, 'type': 'concept'} if self.currentEntity: self._print_entity_intro(entity=self.currentEntity) else: print("not found")
try to match a class and load it
def _es_data(settings): return {k: settings[k] for k in (ConsoleWidget.SETTING_DATA_FORMATING, ConsoleWidget.SETTING_DATA_TYPE)}
Extract data formating related subset of widget settings.
def get_filelikeobject(filename: str = None, blob: bytes = None) -> BinaryIO: if not filename and not blob: raise ValueError("no filename and no blob") if filename and blob: raise ValueError("specify either filename or blob") if filename: return open(filename, 'rb') else: return io.BytesIO(blob)
Open a file-like object. Guard the use of this function with ``with``. Args: filename: for specifying via a filename blob: for specifying via an in-memory ``bytes`` object Returns: a :class:`BinaryIO` object
def op(name, data, display_name=None, description=None, collections=None): import tensorflow.compat.v1 as tf if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name): with tf.control_dependencies([tf.assert_type(data, tf.string)]): return tf.summary.tensor_summary(name='text_summary', tensor=data, collections=collections, summary_metadata=summary_metadata)
Create a legacy text summary op. Text data summarized via this plugin will be visible in the Text Dashboard in TensorBoard. The standard TensorBoard Text Dashboard will render markdown in the strings, and will automatically organize 1D and 2D tensors into tables. If a tensor with more than 2 dimensions is provided, a 2D subarray will be displayed along with a warning message. (Note that this behavior is not intrinsic to the text summary API, but rather to the default TensorBoard text plugin.) Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. data: A string-type Tensor to summarize. The text must be encoded in UTF-8. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of ops.GraphKeys. The collections to which to add the summary. Defaults to [Graph Keys.SUMMARIES]. Returns: A TensorSummary op that is configured so that TensorBoard will recognize that it contains textual data. The TensorSummary is a scalar `Tensor` of type `string` which contains `Summary` protobufs. Raises: ValueError: If tensor has the wrong type.
def delete_vlan(self, nexus_host, vlanid): starttime = time.time() path_snip = snipp.PATH_VLAN % vlanid self.client.rest_delete(path_snip, nexus_host) self.capture_and_print_timeshot( starttime, "del_vlan", switch=nexus_host)
Delete a VLAN on Nexus Switch given the VLAN ID.
def heightmap_normalize( hm: np.ndarray, mi: float = 0.0, ma: float = 1.0 ) -> None: lib.TCOD_heightmap_normalize(_heightmap_cdata(hm), mi, ma)
Normalize heightmap values between ``mi`` and ``ma``. Args: mi (float): The lowest value after normalization. ma (float): The highest value after normalization.
def close(self, value): if value is None: status = Job.TERMINATED status_comment = "Job successfully terminated" else: status = Job.FAILED status_comment = str(value)[:255] self._job.status = status self._job.statusComment = status_comment self._job.update()
Notify the Cytomine server of the job's end Incurs a dataflows
def seg(reference_intervals, estimated_intervals): return min(underseg(reference_intervals, estimated_intervals), overseg(reference_intervals, estimated_intervals))
Compute the MIREX 'MeanSeg' score. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> score = mir_eval.chord.seg(ref_intervals, est_intervals) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2), dtype=float Reference chord intervals to score against. estimated_intervals : np.ndarray, shape=(m, 2), dtype=float Estimated chord intervals to score against. Returns ------- segmentation score : float Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation.
def coarse_tag_str(pos_seq): global tag2coarse tags = [tag2coarse.get(tag, 'O') for tag in pos_seq] return ''.join(tags)
Convert POS sequence to our coarse system, formatted as a string.
def get_project_id(): if os.name == 'nt': command = _CLOUD_SDK_WINDOWS_COMMAND else: command = _CLOUD_SDK_POSIX_COMMAND try: output = subprocess.check_output( (command,) + _CLOUD_SDK_CONFIG_COMMAND, stderr=subprocess.STDOUT) except (subprocess.CalledProcessError, OSError, IOError): return None try: configuration = json.loads(output.decode('utf-8')) except ValueError: return None try: return configuration['configuration']['properties']['core']['project'] except KeyError: return None
Gets the project ID from the Cloud SDK. Returns: Optional[str]: The project ID.
def add_phrase(self, phrase: List[int]) -> None: if len(phrase) == 1: self.final_ids.add(phrase[0]) else: next_word = phrase[0] if next_word not in self.children: self.children[next_word] = AvoidTrie() self.step(next_word).add_phrase(phrase[1:])
Recursively adds a phrase to this trie node. :param phrase: A list of word IDs to add to this trie node.
def parse_regions(self, path): if self.schema_format.lower() == GTF.lower(): res = self._parse_gtf_regions(path) else: res = self._parse_tab_regions(path) return res
Given a file path, it loads it into memory as a Pandas dataframe :param path: file path :return: a Pandas Dataframe
def body_block_paragraph_content(text): "for formatting of simple paragraphs of text only, and check if it is all whitespace" tag_content = OrderedDict() if text and text != '': tag_content["type"] = "paragraph" tag_content["text"] = clean_whitespace(text) return tag_content
for formatting of simple paragraphs of text only, and check if it is all whitespace
def bytes_from_string(value): BYTE_POWER = { 'K': 1, 'KB': 1, 'M': 2, 'MB': 2, 'G': 3, 'GB': 3, 'T': 4, 'TB': 4, 'P': 5, 'PB': 5, } if isinstance(value, six.string_types): value = six.text_type(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) if matches: size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) else: try: size = int(value) except ValueError: msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return size
Interpret human readable string value as bytes. Returns int
def group_by(keys, values=None, reduction=None, axis=0): g = GroupBy(keys, axis) if values is None: return g groups = g.split(values) if reduction is None: return g.unique, groups return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
construct a grouping object on the given keys, optionally performing the given reduction on the given values Parameters ---------- keys : indexable object keys to group by values : array_like, optional sequence of values, of the same length as keys if a reduction function is provided, the given values are reduced by key if no reduction is provided, the given values are grouped and split by key reduction : lambda, optional reduction function to apply to the values in each group axis : int, optional axis to regard as the key-sequence, in case keys is multi-dimensional Returns ------- iterable if values is None, a GroupBy object of the given keys object if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values else, a sequence of tuples of unique keys and reductions of values over that key-group See Also -------- numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
def discrete(self, vertices, scale=1.0): discrete = discretize_arc(vertices[self.points], close=self.closed, scale=scale) return self._orient(discrete)
Discretize the arc entity into line sections. Parameters ------------ vertices : (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float, linear path in space
def push_build_status(id): response = utils.checked_api_call(pnc_api.build_push, 'status', build_record_id=id) if response: return utils.format_json(response)
Get status of Brew push.
def refresh(self): client = self._get_client() endpoint = self._endpoint.format( resource_id=self.resource_id or "", parent_id=self.parent_id or "", grandparent_id=self.grandparent_id or "") response = client.get_resource(endpoint) self._reset_model(response)
Get the latest representation of the current model.
def is_email_simple(value): if '@' not in value or value.startswith('@') or value.endswith('@'): return False try: p1, p2 = value.split('@') except ValueError: return False if '.' not in p2 or p2.startswith('.'): return False return True
Return True if value looks like an email address.
def create(fs, channels, application): result_code = ctypes.c_int() result = _create(fs, channels, application, ctypes.byref(result_code)) if result_code.value is not constants.OK: raise OpusError(result_code.value) return result
Allocates and initializes an encoder state.
def generate_yaml_file(filename, contents): with open(filename, 'w') as file: file.write(yaml.dump(contents, default_flow_style=False))
Creates a yaml file with the given content.
def _is_non_string_iterable(value): if isinstance(value, str): return False if hasattr(value, '__iter__'): return True if isinstance(value, collections.abc.Sequence): return True return False
Whether a value is iterable.
def PrependSOffsetTRelative(self, off): self.Prep(N.SOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth self.PlaceSOffsetT(off2)
PrependSOffsetTRelative prepends an SOffsetT, relative to where it will be written.
def login(self): if self.reddit.is_oauth_session(): ch = self.term.show_notification('Log out? (y/n)') if ch in (ord('y'), ord('Y')): self.oauth.clear_oauth_data() self.term.show_notification('Logged out') else: self.oauth.authorize()
Prompt to log into the user's account, or log out of the current account.
def set_input_by_xpath(self, xpath, value): elem = self.select(xpath).node() if self._lxml_form is None: parent = elem while True: parent = parent.getparent() if parent.tag == 'form': self._lxml_form = parent break return self.set_input(elem.get('name'), value)
Set the value of form element by xpath :param xpath: xpath path :param value: value which should be set to element
def convert_items(self, items): return ((key, self.convert(value, self)) for key, value in items)
Generator like `convert_iterable`, but for 2-tuple iterators.
def slice_to(self, s): result = '' if self.slice_check(): result = self.current[self.bra:self.ket] return result
Copy the slice into the supplied StringBuffer @type s: string
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): return force_unicode(s, encoding, strings_only, errors)
Returns a unicode object representing 's'. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects.
def verify_keys(self): verify_keys_endpoint = Template("${rest_root}/site/${public_key}") url = verify_keys_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key) data = { "clientName": "mollom_python", "clientVersion": "1.0" } self._client.headers["Content-Type"] = "application/x-www-form-urlencoded" response = self._client.post(url, data, timeout=self._timeout) if response.status_code != 200: raise MollomAuthenticationError return True
Verify that the public and private key combination is valid; raises MollomAuthenticationError otherwise
def is_exact(needle, haystack, start, end, matchnot): return ((start >= 0 and end < len(haystack) and haystack[start:end] == needle) ^ matchnot)
Check exact occurrence of needle in haystack
def read_xso(src, xsomap): xso_parser = xso.XSOParser() for class_, cb in xsomap.items(): xso_parser.add_class(class_, cb) driver = xso.SAXDriver(xso_parser) parser = xml.sax.make_parser() parser.setFeature( xml.sax.handler.feature_namespaces, True) parser.setFeature( xml.sax.handler.feature_external_ges, False) parser.setContentHandler(driver) parser.parse(src)
Read a single XSO from a binary file-like input `src` containing an XML document. `xsomap` must be a mapping which maps :class:`~.XSO` subclasses to callables. These will be registered at a newly created :class:`.xso.XSOParser` instance which will be used to parse the document in `src`. The `xsomap` is thus used to determine the class parsing the root element of the XML document. This can be used to support multiple versions.
def _CreateMultipleValuesCondition(self, values, operator): values = ['"%s"' % value if isinstance(value, str) or isinstance(value, unicode) else str(value) for value in values] return '%s %s [%s]' % (self._field, operator, ', '.join(values))
Creates a condition with the provided list of values and operator.
def is_transaction_invalidated(transaction, state_change): is_our_failed_update_transfer = ( isinstance(state_change, ContractReceiveChannelSettled) and isinstance(transaction, ContractSendChannelUpdateTransfer) and state_change.token_network_identifier == transaction.token_network_identifier and state_change.channel_identifier == transaction.channel_identifier ) if is_our_failed_update_transfer: return True return False
True if the `transaction` is made invalid by `state_change`. Some transactions will fail due to race conditions. The races are: - Another transaction which has the same side effect is executed before. - Another transaction which *invalidates* the state of the smart contract required by the local transaction is executed before it. The first case is handled by the predicate `is_transaction_effect_satisfied`, where a transaction from a different source which does the same thing is considered. This predicate handles the second scenario. A transaction can **only** invalidate another iff both share a valid initial state but a different end state. Valid example: A close can invalidate a deposit, because both a close and a deposit can be executed from an opened state (same initial state), but a close transaction will transition the channel to a closed state which doesn't allow for deposits (different end state). Invalid example: A settle transaction cannot invalidate a deposit because a settle is only allowed for the closed state and deposits are only allowed for the open state. In such a case a deposit should never have been sent. The deposit transaction for an invalid state is a bug and not a transaction which was invalidated.
def _after_request(self, response): if utils.disable_tracing_url(flask.request.url, self.blacklist_paths): return response try: tracer = execution_context.get_opencensus_tracer() tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(response.status_code)) except Exception: log.error('Failed to trace request', exc_info=True) finally: return response
A function to be run after each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request
def get_catalog_admin_session(self, proxy): if not self.supports_catalog_admin(): raise errors.Unimplemented() return sessions.CatalogAdminSession(proxy=proxy, runtime=self._runtime)
Gets the catalog administrative session for creating, updating and deleting catalogs. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.cataloging.CatalogAdminSession) - a ``CatalogAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_catalog_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_catalog_admin()`` is ``true``.*
def add_modules(self, package): for name in os.listdir(package.__path__[0]): if name.startswith('_'): continue name = name.split('.')[0] short = '|%s|' % name long = ':mod:`~%s.%s`' % (package.__package__, name) self._short2long[short] = long
Add the modules of the given package without their members.
def cli(log_level): try: has_minimum_version() except TmuxCommandNotFound: click.echo('tmux not found. tmuxp requires you install tmux first.') sys.exit() except exc.TmuxpException as e: click.echo(e, err=True) sys.exit() setup_logger(level=log_level.upper())
Manage tmux sessions. Pass the "--help" argument to any command to see detailed help. See detailed documentation and examples at: http://tmuxp.readthedocs.io/en/latest/
def registerCategory(category): global _DEBUG global _levels global _categories level = 0 chunks = _DEBUG.split(',') for chunk in chunks: if not chunk: continue if ':' in chunk: spec, value = chunk.split(':') else: spec = '*' value = chunk if category in fnmatch.filter((category, ), spec): if not value: continue try: level = int(value) except ValueError: level = 5 _categories[category] = level
Register a given category in the debug system. A level will be assigned to it based on previous calls to setDebug.
def schedule(self, task): self.task_manager.register(task) self.worker_manager.dispatch(task)
Schedules a new Task in the PoolManager.
def import_file(self, record, field, fname, fobj, event=None, return_format='json'): self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'import' pl['field'] = field pl['record'] = record if event: pl['event'] = event file_kwargs = {'files': {'file': (fname, fobj)}} return self._call_api(pl, 'imp_file', **file_kwargs)[0]
Import the contents of a file represented by fobj to a particular records field Parameters ---------- record : str record ID field : str field name where the file will go fname : str file name visible in REDCap UI fobj : file object file object as returned by `open` event : str for longitudinal projects, specify the unique event here return_format : ('json'), 'csv', 'xml' format of error message Returns ------- response : response from server as specified by ``return_format``
def get_package_versions(intfs, props): result = [] for intf in intfs: pkg_name = get_package_from_classname(intf) if pkg_name: key = ENDPOINT_PACKAGE_VERSION_ + pkg_name val = props.get(key, None) if val: result.append((key, val)) return result
Gets the package version of interfaces :param intfs: A list of interfaces :param props: A dictionary containing endpoint package versions :return: A list of tuples (package name, version)
def minimum_multivariate_ess(nmr_params, alpha=0.05, epsilon=0.05): r tmp = 2.0 / nmr_params log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \ + np.log(chi2.ppf(1 - alpha, nmr_params)) - 2 * np.log(epsilon) return int(round(np.exp(log_min_ess)))
r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision. This implements the inequality from Vats et al. (2016): .. math:: \widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}} Where :math:`p` is the number of free parameters. Args: nmr_params (int): the number of free parameters in the model alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means that we want to be in a 95% confidence region. epsilon (float): the level of precision in our multivariate ESS estimate. An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in the target distribution. Returns: float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to obtain the desired confidence region with the desired precision. References: Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo. arXiv:1512.07713v2 [math.ST]
async def download_media_by_id(self, media_id): try: msg = self.found_media[int(media_id)] except (ValueError, KeyError): print('Invalid media ID given or message not found!') return print('Downloading media to usermedia/...') os.makedirs('usermedia', exist_ok=True) output = await self.download_media( msg.media, file='usermedia/', progress_callback=self.download_progress_callback ) print('Media downloaded to {}!'.format(output))
Given a message ID, finds the media this message contained and downloads it.
def get_default_compartment(model): default_compartment = 'c' default_key = set() for reaction in model.reactions: equation = reaction.equation if equation is None: continue for compound, _ in equation.compounds: default_key.add(compound.compartment) if None in default_key and default_compartment in default_key: suffix = 1 while True: new_key = '{}_{}'.format(default_compartment, suffix) if new_key not in default_key: default_compartment = new_key break suffix += 1 if None in default_key: logger.warning( 'Compound(s) found without compartment, default' ' compartment is set to {}.'.format(default_compartment)) return default_compartment
Return what the default compartment should be set to. If some compounds have no compartment, unique compartment name is returned to avoid collisions.
def get_assessment_offered_query_session(self): if not self.supports_assessment_offered_query(): raise errors.Unimplemented() return sessions.AssessmentOfferedQuerySession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment offered query service. return: (osid.assessment.AssessmentOfferedQuerySession) - an ``AssessmentOfferedQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_offered_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_query()`` is ``true``.*
def _extract_key_value(obj): key = None; value = None if isinstance(obj, Value): key = _construct_new_key(obj.name, obj.units) value = [] if obj.scalars: value = [(val.value if isinstance(val, Scalar) else val) for val in obj.scalars] elif obj.vectors and len(obj.vectors) == 1: value = [(val.value if isinstance(val, Scalar) else val) for val in obj.vectors[0]] if len(value) == 1: value = value[0] elif len(value) == 0: value = None if isinstance(obj, ProcessStep): key = "Processing" value = obj.name return key, value
Extract the value from the object and make a descriptive key
def total_errors(self): child_errors = sum(len(tree) for _, tree in iteritems(self._contents)) return len(self.errors) + child_errors
The total number of errors in the entire tree, including children.
def socks_username(self, value): self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksUsername = value
Sets socks proxy username setting. :Args: - value: The socks proxy username value.
def coinbase_tx(cls, public_key_sec, coin_value, coinbase_bytes=b'', version=1, lock_time=0): tx_in = cls.TxIn.coinbase_tx_in(script=coinbase_bytes) COINBASE_SCRIPT_OUT = "%s OP_CHECKSIG" script_text = COINBASE_SCRIPT_OUT % b2h(public_key_sec) script_bin = BitcoinScriptTools.compile(script_text) tx_out = cls.TxOut(coin_value, script_bin) return cls(version, [tx_in], [tx_out], lock_time)
Create the special "first in block" transaction that includes the mining fees.
def average_sources(source_encoded: mx.sym.Symbol, source_encoded_length: mx.sym.Symbol) -> mx.nd.NDArray: source_masked = mx.sym.SequenceMask(data=source_encoded, axis=1, sequence_length=source_encoded_length, use_sequence_length=True, value=0.) averaged = mx.sym.broadcast_div(mx.sym.sum(source_masked, axis=1, keepdims=False), mx.sym.reshape(source_encoded_length, shape=(-1, 1))) return averaged
Calculate the average of encoded sources taking into account their lengths. :param source_encoded: Encoder representation for n elements. Shape: (n, source_encoded_length, hidden_size). :param source_encoded_length: A vector of encoded sequence lengths. Shape: (n,). :return: Average vectors. Shape(n, hidden_size).
def path_exists_or_creatable_portable(pathname: str) -> bool: try: return is_pathname_valid(pathname) and ( os.path.exists(pathname) or is_path_sibling_creatable(pathname)) except OSError: return False
OS-portable check for whether current path exists or is creatable. This function is guaranteed to _never_ raise exceptions. Returns ------ `True` if the passed pathname is a valid pathname on the current OS _and_ either currently exists or is hypothetically creatable in a cross-platform manner optimized for POSIX-unfriendly filesystems; `False` otherwise.
def pandas_series_to_biopython_seqrecord( series, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None ): seq = Seq(series[sequence_col], alphabet=alphabet) id = series[id_col] description = "" if extra_data is not None: description = " ".join([series[key] for key in extra_data]) record = SeqRecord( seq=seq, id=id, description=description, ) seq_records = [record] return seq_records
Convert pandas series to biopython seqrecord for easy writing. Parameters ---------- series : Series Pandas series to convert id_col : str column in dataframe to use as sequence label sequence_col : str column in dataframe to use as sequence data extra_data : list extra columns to use in sequence description line Returns ------- seq_records : List of biopython seqrecords.
def register_preset(cls, name, preset): if cls._presets is None: cls._presets = {} cls._presets[name] = preset
Register a preset instance with the class of the hub it corresponds to. This allows individual plugin objects to automatically register themselves with a preset by using a classmethod of their own with only the name of the preset to register with.
def _update_all_devices(self): self.all_devices = [] self.all_devices.extend(self.keyboards) self.all_devices.extend(self.mice) self.all_devices.extend(self.gamepads) self.all_devices.extend(self.other_devices)
Update the all_devices list.
def deletescript(self, name): code, data = self.__send_command( "DELETESCRIPT", [name.encode("utf-8")]) if code == "OK": return True return False
Delete a script from the server See MANAGESIEVE specifications, section 2.10 :param name: script's name :rtype: boolean
def init(image, root=None): nbd = connect(image) if not nbd: return '' return mount(nbd, root)
Mount the named image via qemu-nbd and return the mounted roots CLI Example: .. code-block:: bash salt '*' qemu_nbd.init /srv/image.qcow2
def cudnnDestroy(handle): status = _libcudnn.cudnnDestroy(ctypes.c_void_p(handle)) cudnnCheckStatus(status)
Release cuDNN resources. Release hardware resources used by cuDNN. Parameters ---------- handle : cudnnHandle cuDNN context.
def getFeatureSet(self, id_): if id_ not in self._featureSetIdMap: raise exceptions.FeatureSetNotFoundException(id_) return self._featureSetIdMap[id_]
Returns the FeatureSet with the specified id, or raises a FeatureSetNotFoundException otherwise.
def packbools(bools, dtype='L'): r = NBITS[dtype] atoms = ATOMS[dtype] for chunk in zip_longest(*[iter(bools)] * r, fillvalue=False): yield sum(compress(atoms, chunk))
Yield integers concatenating bools in chunks of dtype bit-length. >>> list(packbools([False, True, False, True, False, True], 'B')) [42]
def get_compare_version(): state, latest_version = compare_latest_version() if state < 0: return -1, "A new version of Modis is available (v{})".format(latest_version) elif state == 0: return 0, "You are running the latest version of Modis (v{})".format(version) else: return 1, "You are running a preview version of Modis (v{} pre-release)".format(version)
Get the version comparison info. Returns: (tuple) state (int): -1 for lower version, 0 for same version, 1 for higher version than latest. response (str): The response string.
def looks_like_url(url): if not isinstance(url, basestring): return False if not isinstance(url, basestring) or len(url) >= 1024 or not cre_url.match(url): return False return True
Simplified check to see if the text appears to be a URL. Similar to `urlparse` but much more basic. Returns: True if the url str appears to be valid. False otherwise. >>> url = looks_like_url("totalgood.org") >>> bool(url) True
def reftrack_object_data(rt, role): if role == QtCore.Qt.DisplayRole: return str(rt) if role == REFTRACK_OBJECT_ROLE: return rt
Return the reftrack for REFTRACK_OBJECT_ROLE :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the id :rtype: depending on the role :raises: None
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): from scipy import interpolate method = interpolate.BPoly.from_derivatives m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) return m(x)
Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R.
def post(self, path, data, **kwargs): url = self._make_url(path) return self._make_request("POST", url, data=data, **kwargs)
Perform an HTTP POST request of the specified path in Device Cloud Make an HTTP POST request against Device Cloud with this accounts credentials and base url. This method uses the `requests <http://docs.python-requests.org/en/latest/>`_ library `request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_ and all keyword arguments will be passed on to that method. :param str path: Device Cloud path to POST :param int retries: The number of times the request should be retried if an unsuccessful response is received. Most likely, you should leave this at 0. :param data: The data to be posted in the body of the POST request (see docs for ``requests.post`` :raises DeviceCloudHttpException: if a non-success response to the request is received from Device Cloud :returns: A requests ``Response`` object
def get_addon_id(addonxml): xml = parse(addonxml) addon_node = xml.getElementsByTagName('addon')[0] return addon_node.getAttribute('id')
Parses an addon id from the given addon.xml filename.
def _check_local_handlers(cls, signals, handlers, namespace, configs): for aname, sig_name in handlers.items(): if sig_name not in signals: disable_check = configs[aname].get('disable_check', False) if not disable_check: raise SignalError("Cannot find a signal named '%s'" % sig_name)
For every marked handler, see if there is a suitable signal. If not, raise an error.
def _from_dict(cls, _dict): args = {} if 'entities' in _dict: args['entities'] = [ QueryEntitiesResponseItem._from_dict(x) for x in (_dict.get('entities')) ] return cls(**args)
Initialize a QueryEntitiesResponse object from a json dictionary.
def _generate_grid(self): grid_axes = [] for _, param in self.tunables: grid_axes.append(param.get_grid_axis(self.grid_width)) return grid_axes
Get the all possible values for each of the tunables.
def matches(self, verb, params): return (self.ifset is None or self.ifset <= params) and \ (self.ifnset is None or self.ifnset.isdisjoint(params)) and \ (self.methods is None or verb in self.methods)
Test if the method matches the provided set of arguments :param verb: HTTP verb. Uppercase :type verb: str :param params: Existing route parameters :type params: set :returns: Whether this view matches :rtype: bool
def attach_socket(self, container, params=None, ws=False): if params is None: params = { 'stdout': 1, 'stderr': 1, 'stream': 1 } if 'detachKeys' not in params \ and 'detachKeys' in self._general_configs: params['detachKeys'] = self._general_configs['detachKeys'] if ws: return self._attach_websocket(container, params) headers = { 'Connection': 'Upgrade', 'Upgrade': 'tcp' } u = self._url("/containers/{0}/attach", container) return self._get_raw_response_socket( self.post( u, None, params=self._attach_params(params), stream=True, headers=headers ) )
Like ``attach``, but returns the underlying socket-like object for the HTTP request. Args: container (str): The container to attach to. params (dict): Dictionary of request parameters (e.g. ``stdout``, ``stderr``, ``stream``). For ``detachKeys``, ~/.docker/config.json is used by default. ws (bool): Use websockets instead of raw HTTP. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def push(self, obj): if isinstance(obj, str): obj = KQMLToken(obj) self.data.insert(0, obj)
Prepend an element to the beginnging of the list. Parameters ---------- obj : KQMLObject or str If a string is passed, it is instantiated as a KQMLToken before being added to the list.
def ceph_is_installed(module): ceph_package = Ceph(module.conn) if not ceph_package.installed: host = module.conn.hostname raise RuntimeError( 'ceph needs to be installed in remote host: %s' % host )
A helper callback to be executed after the connection is made to ensure that Ceph is installed.
def set_widgets(self): last_layer = self.parent.layer and self.parent.layer.id() or None self.lblDescribeCanvasAggLayer.clear() self.list_compatible_canvas_layers() self.auto_select_one_item(self.lstCanvasAggLayers) if last_layer: layers = [] for indx in range(self.lstCanvasAggLayers.count()): item = self.lstCanvasAggLayers.item(indx) layers += [item.data(QtCore.Qt.UserRole)] if last_layer in layers: self.lstCanvasAggLayers.setCurrentRow(layers.index(last_layer)) self.lblIconIFCWAggregationFromCanvas.setPixmap(QPixmap(None))
Set widgets on the Aggregation Layer from Canvas tab.
def _fw_pointers_convert_append_to_write(previous_version): prev_fw_config = get_fwptr_config(previous_version) return prev_fw_config is FwPointersCfg.ENABLED and ARCTIC_FORWARD_POINTERS_CFG is not FwPointersCfg.ENABLED
This method decides whether to convert an append to a full write in order to avoid data integrity errors
def get_range_string(self): return self.left.chr+":"+str(self.left.end)+'/'+self.right.chr+":"+str(self.right.start)
Another string representation of the junction. these may be redundant.
def makeSer(segID, N, CA, C, O, geo): CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_OG_length=geo.CB_OG_length CA_CB_OG_angle=geo.CA_CB_OG_angle N_CA_CB_OG_diangle=geo.N_CA_CB_OG_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") oxygen_g= calculateCoordinates(N, CA, CB, CB_OG_length, CA_CB_OG_angle, N_CA_CB_OG_diangle) OG= Atom("OG", oxygen_g, 0.0, 1.0, " ", " OG", 0, "O") res= Residue((' ', segID, ' '), "SER", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(OG) return res
Creates a Serine residue
def updateMedShockProcess(self): MedShkDstn = [] for t in range(self.T_cycle): MedShkAvgNow = self.MedShkAvg[t] MedShkStdNow = self.MedShkStd[t] MedShkDstnNow = approxLognormal(mu=np.log(MedShkAvgNow)-0.5*MedShkStdNow**2,\ sigma=MedShkStdNow,N=self.MedShkCount, tail_N=self.MedShkCountTail, tail_bound=[0,0.9]) MedShkDstnNow = addDiscreteOutcomeConstantMean(MedShkDstnNow,0.0,0.0,sort=True) MedShkDstn.append(MedShkDstnNow) self.MedShkDstn = MedShkDstn self.addToTimeVary('MedShkDstn')
Constructs discrete distributions of medical preference shocks for each period in the cycle. Distributions are saved as attribute MedShkDstn, which is added to time_vary. Parameters ---------- None Returns ------- None
def delete(self, cls, rid, user='undefined'): self.validate_record_type(cls) deletedcount = self.db.delete(cls, {ID: rid}) if deletedcount < 1: raise KeyError('No record {}/{}'.format(cls, rid))
Delete a record by id. `user` currently unused. Would be used with soft deletes. >>> s = teststore() >>> s.create('tstoretest', {'id': '1', 'name': 'Toto'}) >>> len(s.list('tstoretest')) 1 >>> s.delete('tstoretest', '1') >>> len(s.list('tstoretest')) 0 >>> s.delete('tstoretest', '1') Traceback (most recent call last): ... KeyError: 'No record tstoretest/1'
def get_binding(self, schema, data): schema = self.parent.get_schema(schema) return Binding(schema, self.parent.resolver, data=data)
For a given schema, get a binding mediator providing links to the RDF terms matching that schema.
def _add_remove_user_template(self, url, template_id, account_id=None, email_address=None): if not email_address and not account_id: raise HSException("No email address or account_id specified") data = {} if account_id is not None: data = { "account_id": account_id } else: data = { "email_address": email_address } request = self._get_request() response = request.post(url + template_id, data) return response
Add or Remove user from a Template We use this function for two tasks because they have the same API call Args: template_id (str): The id of the template account_id (str): ID of the account to add/remove access to/from email_address (str): The email_address of the account to add/remove access to/from Raises: HSException: If no email address or account_id specified Returns: A Template object
def submissionfile_post_save(sender, instance, signal, created, **kwargs): if created: logger.debug("Running post-processing for new submission file.") instance.md5 = instance.attachment_md5() instance.save()
Update MD5 field for newly uploaded files.
def window_size(self, value): if (value > 4 and value < self.parameter_maxima["window_size"] and value % 2): self._window_size = value else: raise InvalidWindowSizeError("Window size must be an odd number " "between 0 and {}.".format( self.parameter_maxima["window_size"] + 1)) self._replace_bm()
Set private ``_window_size`` and reset ``_block_matcher``.