code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_sqs_policy(self, topic_name, topic_arn, topic_subs): t = self.template arn_endpoints = [] url_endpoints = [] for sub in topic_subs: arn_endpoints.append(sub["Endpoint"]) split_endpoint = sub["Endpoint"].split(":") queue_url = "https://%s.%s.amazonaws.com/%s/%s" % ( split_endpoint[2], split_endpoint[3], split_endpoint[4], split_endpoint[5], ) url_endpoints.append(queue_url) policy_doc = queue_policy(topic_arn, arn_endpoints) t.add_resource( sqs.QueuePolicy( topic_name + "SubPolicy", PolicyDocument=policy_doc, Queues=url_endpoints, ) )
This method creates the SQS policy needed for an SNS subscription. It also takes the ARN of the SQS queue and converts it to the URL needed for the subscription, as that takes a URL rather than the ARN.
def get_object(self, request): pattern = request.GET.get('pattern', '') if len(pattern) < 3: raise ObjectDoesNotExist return pattern
The GET parameter 'pattern' is the object.
def body(self): self.connection = pika.BlockingConnection(self.connection_param) self.channel = self.connection.channel() print "Monitoring file '%s'." % self.ftp_extended_log file_iter = process_log( sh.tail("-f", self.ftp_extended_log, _iter=True) ) for import_request in file_iter: self.sendMessage( exchange=self.output_exchange, routing_key=self.output_key, message=serializers.serialize(import_request), UUID=str(uuid.uuid1()) )
This method handles AMQP connection details and reacts to FTP events by sending messages to output queue.
def get_function_name(s): s = s.strip() if s.startswith("__attribute__"): if "))" not in s: raise ValueError("__attribute__ is present, but I cannot find double-right parenthesis in the function " "declaration string.") s = s[s.index("))") + 2 : ].strip() if '(' not in s: raise ValueError("Cannot find any left parenthesis in the function declaration string.") func_name = s[:s.index('(')].strip() for i, ch in enumerate(reversed(func_name)): if ch == ' ': pos = len(func_name) - 1 - i break else: raise ValueError('Cannot find any space in the function declaration string.') func_name = func_name[pos + 1 : ] return func_name
Get the function name from a C-style function declaration string. :param str s: A C-style function declaration string. :return: The function name. :rtype: str
def get_settings(config_uri, section=None, defaults=None): loader = get_loader(config_uri) return loader.get_settings(section, defaults)
Load the settings from a named section. .. code-block:: python settings = plaster.get_settings(...) print(settings['foo']) :param config_uri: Anything that can be parsed by :func:`plaster.parse_uri`. :param section: The name of the section in the config file. If this is ``None`` then it is up to the loader to determine a sensible default usually derived from the fragment in the ``path#name`` syntax of the ``config_uri``. :param defaults: A ``dict`` of default values used to populate the settings and support variable interpolation. Any values in ``defaults`` may be overridden by the loader prior to returning the final configuration dictionary. :returns: A ``dict`` of settings. This should return a dictionary object even if no data is available.
def _ensure_ifaces_tuple(ifaces): try: ifaces = tuple(ifaces) except TypeError: ifaces = (ifaces,) for iface in ifaces: if not _issubclass(iface, ibc.Iface): raise TypeError('Can only compare against interfaces.') return ifaces
Convert to a tuple of interfaces and raise if not interfaces.
def return_dict(self): output_dict = {} output_dict['general'] = self._iterate_through_class(self.general.__dict__) output_dict['figure'] = self._iterate_through_class(self.figure.__dict__) if self.total_plots > 1: trans_dict = ({ str(i): self._iterate_through_class(axis.__dict__) for i, axis in enumerate(self.ax)}) output_dict['plot_info'] = trans_dict else: output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)} if self.print_input: print(output_dict) return output_dict
Output dictionary for ``make_plot.py`` input. Iterates through the entire MainContainer class turning its contents into dictionary form. This dictionary becomes the input for ``make_plot.py``. If `print_input` attribute is True, the entire dictionary will be printed prior to returning the dicitonary. Returns: - **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.
def write(self): string = ''.join(self.content) lines = string.splitlines(True) if len(lines) == 0: return texts = [self.first_prefix + lines[0]] for line in lines[1:]: if line.strip() == '': texts.append('\n') else: texts.append(self.prefix + line) self.base.append(''.join(texts))
Add ``self.contents`` with current ``prefix`` and ``first_prefix`` Add processed ``self.contents`` to ``self.base``. The first line has ``first_prefix`` prepended, further lines have ``prefix`` prepended. Empty (all whitespace) lines get written as bare carriage returns, to avoid ugly extra whitespace.
def emptyTag(self, namespace, name, attrs, hasChildren=False): yield {"type": "EmptyTag", "name": name, "namespace": namespace, "data": attrs} if hasChildren: yield self.error("Void element has children")
Generates an EmptyTag token :arg namespace: the namespace of the token--can be ``None`` :arg name: the name of the element :arg attrs: the attributes of the element as a dict :arg hasChildren: whether or not to yield a SerializationError because this tag shouldn't have children :returns: EmptyTag token
def save_profile(self, **params): image = self.get_image() if (image is None): return profile = image.get('profile', None) if profile is None: profile = Settings.SettingGroup() image.set(profile=profile) self.logger.debug("saving to image profile: params=%s" % ( str(params))) profile.set(**params) return profile
Save the given parameters into profile settings. Parameters ---------- params : dict Keywords and values to be saved.
def create_standalone_context(require=None, **settings) -> 'Context': backend = os.environ.get('MODERNGL_BACKEND') if backend is not None: settings['backend'] = backend ctx = Context.__new__(Context) ctx.mglo, ctx.version_code = mgl.create_standalone_context(settings) ctx._screen = None ctx.fbo = None ctx._info = None ctx.extra = None if require is not None and ctx.version_code < require: raise ValueError('Requested OpenGL version {}, got version {}'.format( require, ctx.version_code)) return ctx
Create a standalone ModernGL context. Example:: # Create a context with highest possible supported version ctx = moderngl.create_context() # Require at least OpenGL 4.3 ctx = moderngl.create_context(require=430) Keyword Arguments: require (int): OpenGL version code. Returns: :py:class:`Context` object
def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label != 'oldimporter' and obj2._meta.app_label != 'oldimporter': return True return None
Relations between objects are allowed between nodeshot2 objects only
def delete(self): if 'delete' in self._URL: extra = {'resource': self.__class__.__name__, 'query': { 'id': self.id}} logger.info("Deleting {} resource.".format(self), extra=extra) self._api.delete(url=self._URL['delete'].format(id=self.id)) else: raise SbgError('Resource can not be deleted!')
Deletes the resource on the server.
def to_str(self): dflt = super(DataportenAccount, self).to_str() return '%s (%s)' % ( self.account.extra_data.get('name', ''), dflt, )
Returns string representation of a social account. Includes the name of the user.
def update(backend=None): fileserver = salt.fileserver.Fileserver(__opts__) fileserver.update(back=backend) return True
Update the fileserver cache. If no backend is provided, then the cache for all configured backends will be updated. backend Narrow fileserver backends to a subset of the enabled ones. .. versionchanged:: 2015.5.0 If all passed backends start with a minus sign (``-``), then these backends will be excluded from the enabled backends. However, if there is a mix of backends with and without a minus sign (ex: ``backend=-roots,git``) then the ones starting with a minus sign will be disregarded. Additionally, fileserver backends can now be passed as a comma-separated list. In earlier versions, they needed to be passed as a python list (ex: ``backend="['roots', 'git']"``) CLI Example: .. code-block:: bash salt-run fileserver.update salt-run fileserver.update backend=roots,git
def GE(classical_reg1, classical_reg2, classical_reg3): classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1, classical_reg2, classical_reg3) return ClassicalGreaterEqual(classical_reg1, classical_reg2, classical_reg3)
Produce an GE instruction. :param classical_reg1: Memory address to which to store the comparison result. :param classical_reg2: Left comparison operand. :param classical_reg3: Right comparison operand. :return: A ClassicalGreaterEqual instance.
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None, encoding=None, encoding_errors=None, kwargs=None, _wtitle=False): try: text, title = _get( path_or_file, default=default, mime=mime, name=name, backend=backend, kwargs=kwargs, encoding=encoding, encoding_errors=encoding_errors, _wtitle=_wtitle) if _wtitle: return (text, title) else: return text except Exception as e: if default is not SENTINAL: LOGGER.exception(e) return default raise
Get document full text. Accepts a path or file-like object. * If given, `default` is returned instead of an error. * `backend` is either a module object or a string specifying which default backend to use (e.g. "doc"); take a look at backends directory to see a list of default backends. * `mime` and `name` should be passed if the information is available to caller, otherwise a best guess is made. If both are specified `mime` takes precedence. * `encoding` and `encoding_errors` are used to handle text encoding. They are taken into consideration mostly only by pure-python backends which do not rely on CLI tools. Default to "utf8" and "strict" respectively. * `kwargs` are passed to the underlying backend.
def profile_url(obj, profile_app_name, profile_model_name): try: content_type = ContentType.objects.get( app_label=profile_app_name, model=profile_model_name.lower() ) profile = content_type.get_object_for_this_type(user=obj.user) return profile.get_absolute_url() except ContentType.DoesNotExist: return "" except AttributeError: return ""
returns profile url of user
def update_bindings(self): with self._lock: all_valid = True for handler in self.get_handlers(handlers_const.KIND_DEPENDENCY): self.__safe_handler_callback(handler, "try_binding") all_valid &= self.__safe_handler_callback( handler, "is_valid", only_boolean=True, none_as_true=True ) return all_valid
Updates the bindings of the given component :return: True if the component can be validated
def split_classes(X, y): lstsclass = np.unique(y) return [X[y == i, :].astype(np.float32) for i in lstsclass]
split samples in X by classes in y
def trace(function, *args, **k) : if doTrace : print ("> "+function.__name__, args, k) result = function(*args, **k) if doTrace : print ("< "+function.__name__, args, k, "->", result) return result
Decorates a function by tracing the begining and end of the function execution, if doTrace global is True
def import_authors(self, tree): self.write_out(self.style.STEP('- Importing authors\n')) post_authors = set() for item in tree.findall('channel/item'): post_type = item.find('{%s}post_type' % WP_NS).text if post_type == 'post': post_authors.add(item.find( '{http://purl.org/dc/elements/1.1/}creator').text) self.write_out('> %i authors found.\n' % len(post_authors)) authors = {} for post_author in post_authors: if self.default_author: authors[post_author] = self.default_author else: authors[post_author] = self.migrate_author( post_author.replace(' ', '-')) return authors
Retrieve all the authors used in posts and convert it to new or existing author and return the conversion.
def is_BF_hypergraph(self): for hyperedge_id in self._hyperedge_attributes: tail = self.get_hyperedge_tail(hyperedge_id) head = self.get_hyperedge_head(hyperedge_id) if len(tail) > 1 and len(head) > 1: return False return True
Indicates whether the hypergraph is a BF-hypergraph. A BF-hypergraph consists of only B-hyperedges and F-hyperedges. See "is_B_hypergraph" or "is_F_hypergraph" for more details. :returns: bool -- True iff the hypergraph is an F-hypergraph.
def _run_post_configure_callbacks(self, configure_args): resulting_configuration = ImmutableDict(self.config) multiple_callbacks = copy.copy( self._post_configure_callbacks['multiple'] ) single_callbacks = copy.copy(self._post_configure_callbacks['single']) self._post_configure_callbacks['single'] = [] for callback in multiple_callbacks: callback(resulting_configuration, configure_args) for callback in single_callbacks: callback(resulting_configuration, configure_args)
Run all post configure callbacks we have stored. Functions are passed the configuration that resulted from the call to :meth:`configure` as the first argument, in an immutable form; and are given the arguments passed to :meth:`configure` for the second argument. Returns from callbacks are ignored in all fashion. Args: configure_args (list[object]): The full list of arguments passed to :meth:`configure`. Returns: None: Does not return anything.
def returns_annualized(returns, geometric=True, scale=None, expanding=False): scale = _resolve_periods_in_year(scale, returns) if expanding: if geometric: n = pd.expanding_count(returns) return ((1. + returns).cumprod() ** (scale / n)) - 1. else: return pd.expanding_mean(returns) * scale else: if geometric: n = returns.count() return ((1. + returns).prod() ** (scale / n)) - 1. else: return returns.mean() * scale
return the annualized cumulative returns Parameters ---------- returns : DataFrame or Series geometric : link the returns geometrically scale: None or scalar or string (ie 12 for months in year), If None, attempt to resolve from returns If scalar, then use this as the annualization factor If string, then pass this to periodicity function to resolve annualization factor expanding: bool, default is False If True, return expanding series/frames. If False, return final result.
def update_hash(a_hash, mv): if mv.labels: signing.add_dict_to_hash(a_hash, encoding.MessageToPyValue(mv.labels)) money_value = mv.get_assigned_value(u'moneyValue') if money_value is not None: a_hash.update(b'\x00') a_hash.update(money_value.currencyCode.encode('utf-8'))
Adds ``mv`` to ``a_hash`` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 mv (:class:`MetricValue`): the instance to add to the hash
def access(self, path, mode, dir_fd=None, follow_symlinks=None): if follow_symlinks is not None and sys.version_info < (3, 3): raise TypeError("access() got an unexpected " "keyword argument 'follow_symlinks'") path = self._path_with_dir_fd(path, self.access, dir_fd) try: stat_result = self.stat(path, follow_symlinks=follow_symlinks) except OSError as os_error: if os_error.errno == errno.ENOENT: return False raise if is_root(): mode &= ~os.W_OK return (mode & ((stat_result.st_mode >> 6) & 7)) == mode
Check if a file exists and has the specified permissions. Args: path: (str) Path to the file. mode: (int) Permissions represented as a bitwise-OR combination of os.F_OK, os.R_OK, os.W_OK, and os.X_OK. dir_fd: If not `None`, the file descriptor of a directory, with `path` being relative to this directory. New in Python 3.3. follow_symlinks: (bool) If `False` and `path` points to a symlink, the link itself is queried instead of the linked object. New in Python 3.3. Returns: bool, `True` if file is accessible, `False` otherwise.
def get_response(self): request = getattr(requests, self.request_method, None) if request is None and self._request_method is None: raise ValueError("A effective http request method must be set") if self.request_url is None: raise ValueError( "Fatal error occurred, the class property \"request_url\" is" "set to None, reset it with an effective url of dingtalk api." ) response = request(self.request_url, **self.kwargs) self.response = response return response
Get the original response of requests
def find_whole_word(w): return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
Scan through string looking for a location where this word produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern; note that this is different from finding a zero-length match at some point in the string.
def _update_plotting_params(self, **kwargs): scalars = kwargs.get('scalars', None) if scalars is not None: old = self.display_params['scalars'] self.display_params['scalars'] = scalars if old != scalars: self.plotter.subplot(*self.loc) self.plotter.remove_actor(self._data_to_update, reset_camera=False) self._need_to_update = True self.valid_range = self.input_dataset.get_data_range(scalars) cmap = kwargs.get('cmap', None) if cmap is not None: self.display_params['cmap'] = cmap
Some plotting parameters can be changed through the tool; this updataes those plotting parameters.
def get_extra_context(site, ctx): 'Returns extra data useful to the templates.' ctx['site'] = site ctx['feeds'] = feeds = site.active_feeds.order_by('name') def get_mod_chk(k): mod, chk = ( (max(vals) if vals else None) for vals in ( filter(None, it.imap(op.attrgetter(k), feeds)) for k in ['last_modified', 'last_checked'] ) ) chk = chk or datetime(1970, 1, 1, 0, 0, 0, 0, timezone.utc) ctx['last_modified'], ctx['last_checked'] = mod or chk, chk return ctx[k] for k in 'last_modified', 'last_checked': ctx[k] = lambda: get_mod_chk(k) ctx['media_url'] = ctx['static_url'] =\ '{}feedjack/{}'.format(settings.STATIC_URL, site.template)
Returns extra data useful to the templates.
def format(self, obj, **kwargs): sio = StringIO() self.fprint(obj, stream=sio, **kwargs) return sio.getvalue()
Return the formatted representation of the object as a string.
def link(self, title, path): if not isinstance(path, file): path = salt.utils.files.fopen(path) self.__current_section.append({title: path})
Add a static file on the file system. :param title: :param path: :return:
def pWMWrite(fileHandle, pWM, alphabetSize=4): for i in xrange(0, alphabetSize): fileHandle.write("%s\n" % ' '.join([ str(pWM[j][i]) for j in xrange(0, len(pWM)) ]))
Writes file in standard PWM format, is reverse of pWMParser
def dict_to_numpy_array(d): return fromarrays(d.values(), np.dtype([(str(k), v.dtype) for k, v in d.items()]))
Convert a dict of 1d array to a numpy recarray
def get_user(self, user_name=None): params = {} if user_name: params['UserName'] = user_name return self.get_response('GetUser', params)
Retrieve information about the specified user. If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type user_name: string :param user_name: The name of the user to delete. If not specified, defaults to user making request.
def _uri_to_etext(cls, uri_ref): try: return validate_etextno(int(os.path.basename(uri_ref.toPython()))) except InvalidEtextIdException: return None
Converts the representation used to identify a text in the meta-data RDF graph to a human-friendly integer text identifier.
def _cancelScheduledUpgrade(self, justification=None) -> None: if self.scheduledAction: why_prefix = ": " why = justification if justification is None: why_prefix = ", " why = "cancellation reason not specified" ev_data = self.scheduledAction logger.info("Cancelling upgrade {}" " of node {}" " of package {}" " to version {}" " scheduled on {}" "{}{}" .format(ev_data.upgrade_id, self.nodeName, ev_data.pkg_name, ev_data.version, ev_data.when, why_prefix, why)) self._unscheduleAction() self._actionLog.append_cancelled(ev_data) self._notifier.sendMessageUponPoolUpgradeCancel( "Upgrade of package {} on node '{}' to version {} " "has been cancelled due to {}" .format(ev_data.pkg_name, self.nodeName, ev_data.version, why))
Cancels scheduled upgrade :param when: time upgrade was scheduled to :param version: version upgrade scheduled for
def read_chunk(stream): chunk = stream.read(1) while chunk in SKIP: chunk = stream.read(1) if chunk == "\"": chunk += stream.read(1) while not chunk.endswith("\""): if chunk[-1] == ESCAPE: chunk += stream.read(2) else: chunk += stream.read(1) return chunk
Ignore whitespace outside of strings. If we hit a string, read it in its entirety.
def choose(self, choose_from): for choice in self.elements: if choice in choose_from: return ImplementationChoice(choice, choose_from[choice]) raise LookupError(self.elements, choose_from.keys())
given a mapping of implementations choose one based on the current settings returns a key value pair
def get_info(self): reconstructed = self.is_reconstructed() site, site_type = self.get_site() return reconstructed, site, site_type
Return surface reconstruction as well as primary and secondary adsorption site labels
def vb_destroy_machine(name=None, timeout=10000): vbox = vb_get_box() log.info('Destroying machine %s', name) machine = vbox.findMachine(name) files = machine.unregister(2) progress = machine.deleteConfig(files) progress.waitForCompletion(timeout) log.info('Finished destroying machine %s', name)
Attempts to get rid of a machine and all its files from the hypervisor @param name: @type name: str @param timeout int timeout in milliseconds
def _update_font_style(self, font_style): toggle_state = font_style & wx.FONTSTYLE_ITALIC == wx.FONTSTYLE_ITALIC self.ToggleTool(wx.FONTFLAG_ITALIC, toggle_state)
Updates font style widget Parameters ---------- font_style: Integer \tButton down iif font_style == wx.FONTSTYLE_ITALIC
def _dump_to_pages(dump): pos = 0 ret = [] start_tag = u"<page>\n" end_tag = u"</page>\n" while True: start_pos = dump.find(start_tag, pos) if start_pos == -1: break start_pos += len(start_tag) end_pos = dump.find(end_tag, start_pos) if end_pos == -1: break ret.append(dump[start_pos:end_pos]) pos = end_pos + len(end_tag) return ret
Extract pages from an xml dump. Args: dump: a unicode string Returns: a list of unicode strings
def _load_from_cache_if_available(self, key): if key in self._cache: entity = self._cache[key] if entity is None or entity._key == key: raise tasklets.Return(entity)
Returns a cached Model instance given the entity key if available. Args: key: Key instance. Returns: A Model instance if the key exists in the cache.
def _handle_command(self, command, env, args): plugin_obj = registration.get_command_hook(command, env.task.active) if plugin_obj and not env.task.active: if plugin_obj.task_only or plugin_obj.options: plugin_obj = None if plugin_obj: if plugin_obj.needs_root: registration.setup_sudo_access(plugin_obj) parser = self._get_plugin_parser(plugin_obj) parsed_args = parser.parse_args(args) plugin_obj.execute(env, parsed_args) return True return False
Handles calling appropriate command plugin based on the arguments provided. `command` Command string. `env` Runtime ``Environment`` instance. `args` List of argument strings passed. Returns ``False`` if nothing handled. * Raises ``HelpBanner`` exception if mismatched command arguments.
def compile_pillar(self): load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext try: ret_pillar = yield self.channel.crypted_transfer_decode_dictentry( load, dictkey='pillar', ) except Exception: log.exception('Exception getting pillar:') raise SaltClientError('Exception getting pillar.') if not isinstance(ret_pillar, dict): msg = ('Got a bad pillar from master, type {0}, expecting dict: ' '{1}').format(type(ret_pillar).__name__, ret_pillar) log.error(msg) raise SaltClientError(msg) raise tornado.gen.Return(ret_pillar)
Return a future which will contain the pillar data from the master
def show_image(kwargs, call=None): if call != 'function': raise SaltCloudSystemExit( 'The show_image action must be called with -f or --function.' ) name = kwargs['image'] log.info("Showing image %s", name) machine = vb_get_machine(name) ret = { machine["name"]: treat_machine_dict(machine) } del machine["name"] return ret
Show the details of an image
def _get_bundles_by_type(self, type): bundles = {} bundle_definitions = self.config.get(type) if bundle_definitions is None: return bundles for bundle_name, paths in bundle_definitions.items(): bundle_files = [] for path in paths: pattern = abspath = os.path.join(self.basedir, path) assetdir = os.path.dirname(abspath) fnames = [os.path.join(assetdir, fname) for fname in os.listdir(assetdir)] expanded_fnames = fnmatch.filter(fnames, pattern) bundle_files.extend(sorted(expanded_fnames)) bundles[bundle_name] = bundle_files return bundles
Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css'
def DOM_node_to_XML(tree, xml_declaration=True): result = ET.tostring(tree, encoding='utf8', method='xml').decode('utf-8') if not xml_declaration: result = result.split("<?xml version='1.0' encoding='utf8'?>\n")[1] return result
Prints a DOM tree to its Unicode representation. :param tree: the input DOM tree :type tree: an ``xml.etree.ElementTree.Element`` object :param xml_declaration: if ``True`` (default) prints a leading XML declaration line :type xml_declaration: bool :returns: Unicode object
def get(self, key): if key in self and len(self[key]) > 0: return min(self[key]) else: return 0
Return timings for `key`. Returns 0 if not present.
def apply_numpy_specials(self, copy=True): if copy: data = self.data.astype(numpy.float64) elif self.data.dtype != numpy.float64: data = self.data = self.data.astype(numpy.float64) else: data = self.data data[data == self.specials['Null']] = numpy.nan data[data < self.specials['Min']] = numpy.NINF data[data > self.specials['Max']] = numpy.inf return data
Convert isis special pixel values to numpy special pixel values. ======= ======= Isis Numpy ======= ======= Null nan Lrs -inf Lis -inf His inf Hrs inf ======= ======= Parameters ---------- copy : bool [True] Whether to apply the new special values to a copy of the pixel data and leave the original unaffected Returns ------- Numpy Array A numpy array with special values converted to numpy's nan, inf, and -inf
def with_log(func): @functools.wraps(func) def wrapper(*args, **kwargs): decorator_logger = logging.getLogger('@with_log') decorator_logger.debug('Entering %s() function call.', func.__name__) log = kwargs.get('log', logging.getLogger(func.__name__)) try: ret = func(log=log, *args, **kwargs) finally: decorator_logger.debug('Leaving %s() function call.', func.__name__) return ret return wrapper
Automatically adds a named logger to a function upon function call. :param func: Function to decorate. :return: Decorated function. :rtype: function
def guess_xml_encoding(self, content): r matchobj = self.__regex['xml_encoding'].match(content) return matchobj and matchobj.group(1).lower()
r"""Guess encoding from xml header declaration. :param content: xml content :rtype: str or None
def parse_s2bins(s2bins): s2b = {} b2s = {} for line in s2bins: line = line.strip().split() s, b = line[0], line[1] if 'UNK' in b: continue if len(line) > 2: g = ' '.join(line[2:]) else: g = 'n/a' b = '%s\t%s' % (b, g) s2b[s] = b if b not in b2s: b2s[b] = [] b2s[b].append(s) return s2b, b2s
parse ggKbase scaffold-to-bin mapping - scaffolds-to-bins and bins-to-scaffolds
def __FinalUrlValue(self, value, field): if isinstance(field, messages.BytesField) and value is not None: return base64.urlsafe_b64encode(value) elif isinstance(value, six.text_type): return value.encode('utf8') elif isinstance(value, six.binary_type): return value.decode('utf8') elif isinstance(value, datetime.datetime): return value.isoformat() return value
Encode value for the URL, using field to skip encoding for bytes.
def open(self): filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE) if filename: self.new(filename)
open a session to define in a dialog in an extra window
def facilityNetToMs(): a = TpPd(pd=0x3) b = MessageType(mesType=0x3a) c = Facility() packet = a / b / c return packet
FACILITY Section 9.3.9.1
def growth_volatility(eqdata, **kwargs): _window = kwargs.get('window', 20) _selection = kwargs.get('selection', 'Adj Close') _outputcol = kwargs.get('outputcol', 'Growth Risk') _growthdata = simple.growth(eqdata, selection=_selection) return volatility(_growthdata, outputcol=_outputcol, window=_window)
Return the volatility of growth. Note that, like :func:`pynance.tech.simple.growth` but in contrast to :func:`volatility`, :func:`growth_volatility` applies directly to a dataframe like that returned by :func:`pynance.data.retrieve.get`, not necessarily to a single-column dataframe. Parameters ---------- eqdata : DataFrame Data from which to extract growth volatility. An exception will be raised if `eqdata` does not contain a column 'Adj Close' or an optional name specified by the `selection` parameter. window : int, optional Window on which to calculate volatility. Defaults to 20. selection : str, optional Column of eqdata on which to calculate volatility of growth. Defaults to 'Adj Close' outputcol : str, optional Column to use for output. Defaults to 'Growth Risk'. Returns --------- out : DataFrame Dataframe showing the volatility of growth over the specified `window`.
def config_oauth(app): " Configure oauth support. " for name in PROVIDERS: config = app.config.get('OAUTH_%s' % name.upper()) if not config: continue if not name in oauth.remote_apps: remote_app = oauth.remote_app(name, **config) else: remote_app = oauth.remote_apps[name] client_class = CLIENTS.get(name) client_class(app, remote_app)
Configure oauth support.
def flatten_path(path, flatten_slashes=False): if not path or path == '/': return '/' if path[0] == '/': path = path[1:] parts = path.split('/') new_parts = collections.deque() for part in parts: if part == '.' or (flatten_slashes and not part): continue elif part != '..': new_parts.append(part) elif new_parts: new_parts.pop() if flatten_slashes and path.endswith('/') or not len(new_parts): new_parts.append('') new_parts.appendleft('') return '/'.join(new_parts)
Flatten an absolute URL path by removing the dot segments. :func:`urllib.parse.urljoin` has some support for removing dot segments, but it is conservative and only removes them as needed. Arguments: path (str): The URL path. flatten_slashes (bool): If True, consecutive slashes are removed. The path returned will always have a leading slash.
def field_to_long(value): if isinstance(value, (int, long)): return long(value) elif isinstance(value, basestring): return bytes_to_long(from_hex(value)) else: return None
Converts given value to long if possible, otherwise None is returned. :param value: :return:
def verify_signature(self, addr): return verify(virtualchain.address_reencode(addr), self.get_plaintext_to_sign(), self.sig)
Given an address, verify whether or not it was signed by it
def fail(self, message, status=500, **kw): self.request.response.setStatus(status) result = {"success": False, "errors": message, "status": status} result.update(kw) return result
Set a JSON error object and a status to the response
def look_up(self, **keys: Dict[InstanceName, ScalarValue]) -> "ArrayEntry": if not isinstance(self.schema_node, ListNode): raise InstanceValueError(self.json_pointer(), "lookup on non-list") try: for i in range(len(self.value)): en = self.value[i] flag = True for k in keys: if en[k] != keys[k]: flag = False break if flag: return self._entry(i) raise NonexistentInstance(self.json_pointer(), "entry lookup failed") except KeyError: raise NonexistentInstance(self.json_pointer(), "entry lookup failed") from None except TypeError: raise InstanceValueError(self.json_pointer(), "lookup on non-list") from None
Return the entry with matching keys. Args: keys: Keys and values specified as keyword arguments. Raises: InstanceValueError: If the receiver's value is not a YANG list. NonexistentInstance: If no entry with matching keys exists.
def setSignalHeaders(self, signalHeaders): for edfsignal in np.arange(self.n_channels): self.channels[edfsignal] = signalHeaders[edfsignal] self.update_header()
Sets the parameter for all signals Parameters ---------- signalHeaders : array_like containing dict with 'label' : str channel label (string, <= 16 characters, must be unique) 'dimension' : str physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : int sample frequency in hertz 'physical_max' : float maximum physical value 'physical_min' : float minimum physical value 'digital_max' : int maximum digital value (-2**15 <= x < 2**15) 'digital_min' : int minimum digital value (-2**15 <= x < 2**15)
def socket_close(self): if self.sock != NC.INVALID_SOCKET: self.sock.close() self.sock = NC.INVALID_SOCKET
Close our socket.
def _cosmoid_request(self, resource, cosmoid, **kwargs): params = { 'cosmoid': cosmoid, } params.update(kwargs) return self.make_request(resource, params)
Maps to the Generic API method for requests who's only parameter is ``cosmoid``
def iter_entries(self): for idx in range(self._entry_count): dir_entry_offset = self._offset + 2 + (idx*12) ifd_entry = _IfdEntryFactory(self._stream_rdr, dir_entry_offset) yield ifd_entry
Generate an |_IfdEntry| instance corresponding to each entry in the directory.
def _response_value(self, response, json=True): if json: contentType = response.headers.get("Content-Type", "") if not contentType.startswith(("application/json", "text/javascript")): if response.status_code == 200: raise CloudStackException( "JSON (application/json) was expected, got {!r}" .format(contentType), response=response) raise CloudStackException( "HTTP {0.status_code} {0.reason}" .format(response), "Make sure endpoint URL {!r} is correct." .format(self.endpoint), response=response) try: data = response.json() except ValueError as e: raise CloudStackException( "HTTP {0.status_code} {0.reason}" .format(response), "{0!s}. Malformed JSON document".format(e), response=response) [key] = data.keys() data = data[key] else: data = response.text if response.status_code != 200: raise CloudStackException( "HTTP {0} response from CloudStack".format( response.status_code), data, response=response) return data
Parses the HTTP response as a the cloudstack value. It throws an exception if the server didn't answer with a 200.
def level_names(self): index = self.to_index() if isinstance(index, pd.MultiIndex): return index.names else: return None
Return MultiIndex level names or None if this IndexVariable has no MultiIndex.
def _get_enum(self, source, bitarray): raw_value = self._get_raw(source, bitarray) value_desc = source.find('item', {'value': str(raw_value)}) or self._get_rangeitem(source, raw_value) return { source['shortcut']: { 'description': source.get('description'), 'unit': source.get('unit', ''), 'value': value_desc['description'].format(value=raw_value), 'raw_value': raw_value, } }
Get enum value, based on the data in XML
def get_statements(self): for k, v in self.reader_output.items(): for interaction in v['interactions']: self._process_interaction(k, interaction, v['text'], self.pmid, self.extra_annotations)
Process reader output to produce INDRA Statements.
def _known_populations(row, pops): cutoff = 0.01 out = set([]) for pop, base in [("esp", "af_esp_all"), ("1000g", "af_1kg_all"), ("exac", "af_exac_all"), ("anypop", "max_aaf_all")]: for key in [x for x in pops if x.startswith(base)]: val = row[key] if val and val > cutoff: out.add(pop) return sorted(list(out))
Find variants present in substantial frequency in population databases.
def new(cls, ns_path, script, campaign_dir, runner_type='Auto', overwrite=False, optimized=True, check_repo=True): ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) if Path(campaign_dir).exists() and not overwrite: manager = CampaignManager.load(campaign_dir, ns_path, runner_type=runner_type, optimized=optimized, check_repo=check_repo) if manager.db.get_script() == script: return manager else: del manager runner = CampaignManager.create_runner(ns_path, script, runner_type=runner_type, optimized=optimized) params = runner.get_available_parameters() commit = "" if check_repo: from git import Repo, exc commit = Repo(ns_path).head.commit.hexsha db = DatabaseManager.new(script=script, params=params, commit=commit, campaign_dir=campaign_dir, overwrite=overwrite) return cls(db, runner, check_repo)
Create a new campaign from an ns-3 installation and a campaign directory. This method will create a DatabaseManager, which will install a database in the specified campaign_dir. If a database is already available at the ns_path described in the specified campaign_dir and its configuration matches config, this instance is used instead. If the overwrite argument is set to True instead, the specified directory is wiped and a new campaign is created in its place. Furthermore, this method will initialize a SimulationRunner, of type specified by the runner_type parameter, which will be locked on the ns-3 installation at ns_path and set up to run the desired script. Finally, note that creation of a campaign requires a git repository to be initialized at the specified ns_path. This will allow SEM to save the commit at which the simulations are run, enforce reproducibility and avoid mixing results coming from different versions of ns-3 and its libraries. Args: ns_path (str): path to the ns-3 installation to employ in this campaign. script (str): ns-3 script that will be executed to run simulations. campaign_dir (str): path to the directory in which to save the simulation campaign database. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). Use Auto to automatically pick the best runner. overwrite (bool): whether to overwrite already existing campaign_dir folders. This deletes the directory if and only if it only contains files that were detected to be created by sem. optimized (bool): whether to configure the runner to employ an optimized ns-3 build.
def nodeToXML(nodeObject): xmlRoot = etree.Element(NODE + "node", nsmap=NODE_NSMAP) nameNode = etree.SubElement(xmlRoot, NODE + "name") nameNode.text = nodeObject.node_name urlNode = etree.SubElement(xmlRoot, NODE + "url") urlNode.text = nodeObject.node_url pathNode = etree.SubElement(xmlRoot, NODE + "path") pathNode.text = nodeObject.node_path capNode = etree.SubElement(xmlRoot, NODE + "capacity") capNode.text = str(nodeObject.node_capacity) sizeNode = etree.SubElement(xmlRoot, NODE + "size") sizeNode.text = str(nodeObject.node_size) if nodeObject.last_checked: checkedNode = etree.SubElement(xmlRoot, NODE + "lastChecked") checkedNode.text = nodeObject.last_checked.strftime(TIME_FORMAT_STRING) return xmlRoot
Take a Django node object from our CODA store and make an XML representation
def visit(self, node): for pattern, replace in know_pattern: check = Check(node, dict()) if check.visit(pattern): node = PlaceholderReplace(check.placeholders).visit(replace()) self.update = True return super(PatternTransform, self).visit(node)
Try to replace if node match the given pattern or keep going.
def write_badge(self, file_path, overwrite=False): if file_path.endswith('/'): raise Exception('File location may not be a directory.') path = os.path.abspath(file_path) if not path.lower().endswith('.svg'): path += '.svg' if not overwrite and os.path.exists(path): raise Exception('File "{}" already exists.'.format(path)) with open(path, mode='w') as file_handle: file_handle.write(self.badge_svg_text)
Write badge to file.
def load(self, profile_args): for key, value in profile_args.items(): self.add(key, value)
Load provided CLI Args. Args: args (dict): Dictionary of args in key/value format.
def GetFeedItemIdsForCampaign(campaign_feed): feed_item_ids = set() try: lhs_operand = campaign_feed['matchingFunction']['lhsOperand'] except KeyError: lhs_operand = None if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] == 'RequestContextOperand'): request_context_operand = lhs_operand[0] if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and campaign_feed['matchingFunction']['operator'] == 'IN'): for argument in campaign_feed['matchingFunction']['rhsOperand']: if argument['xsi_type'] == 'ConstantOperand': feed_item_ids.add(argument['longValue']) return feed_item_ids
Gets the Feed Item Ids used by a campaign through a given Campaign Feed. Args: campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from. Returns: A list of Feed Item IDs.
def get_default_gwf_api(): for lib in APIS: try: import_gwf_library(lib) except ImportError: continue else: return lib raise ImportError("no GWF API available, please install a third-party GWF " "library ({}) and try again".format(', '.join(APIS)))
Return the preferred GWF library Examples -------- If you have |LDAStools.frameCPP|_ installed: >>> from gwpy.timeseries.io.gwf import get_default_gwf_api >>> get_default_gwf_api() 'framecpp' Or, if you don't have |lalframe|_: >>> get_default_gwf_api() 'lalframe' Otherwise: >>> get_default_gwf_api() ImportError: no GWF API available, please install a third-party GWF library (framecpp, lalframe) and try again
def concat_padded(base, *args): ret = base for n in args: if is_string(n): ret = "%s_%s" % (ret, n) else: ret = "%s_%04i" % (ret, n + 1) return ret
Concatenate string and zero-padded 4 digit number
def _pool_event_refresh_cb(conn, pool, opaque): _salt_send_event(opaque, conn, { 'pool': { 'name': pool.name(), 'uuid': pool.UUIDString() }, 'event': opaque['event'] })
Storage pool refresh events handler
def getanymentions(idf, anidfobject): name = anidfobject.obj[1] foundobjs = [] keys = idfobjectkeys(idf) idfkeyobjects = [idf.idfobjects[key.upper()] for key in keys] for idfobjects in idfkeyobjects: for idfobject in idfobjects: if name.upper() in [item.upper() for item in idfobject.obj if isinstance(item, basestring)]: foundobjs.append(idfobject) return foundobjs
Find out if idjobject is mentioned an any object anywhere
def remove_external_data_field(tensor, field_key): for (i, field) in enumerate(tensor.external_data): if field.key == field_key: del tensor.external_data[i]
Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed
def encode_command(*args, buf=None): if buf is None: buf = bytearray() buf.extend(b'*%d\r\n' % len(args)) try: for arg in args: barg = _converters[type(arg)](arg) buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg)) except KeyError: raise TypeError("Argument {!r} expected to be of bytearray, bytes," " float, int, or str type".format(arg)) return buf
Encodes arguments into redis bulk-strings array. Raises TypeError if any of args not of bytearray, bytes, float, int, or str type.
def capabilities(self): response = self.get(PATH_CAPABILITIES) return _load_atom(response, MATCH_ENTRY_CONTENT).capabilities
Returns the list of system capabilities. :return: A ``list`` of capabilities.
async def start_authentication(self): _, code = await self.http.post_data( 'pair-pin-start', headers=_AIRPLAY_HEADERS) if code != 200: raise DeviceAuthenticationError('pair start failed')
Start the authentication process. This method will show the expected PIN on screen.
async def get_person(self, id_): data = await self._get_person_json( id_, OrderedDict(append_to_response='movie_credits') ) return Person.from_json(data, self.config['data'].get('images'))
Retrieve person data by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. Returns: :py:class:`~.Person`: The requested person.
def __get_payload(self, uuid, failed): try: return self.publish_uuid_store[uuid] except Exception as exc: msg = "Failed to load payload from publish store for UUID %s, %s: %s" if uuid in failed: log.error(msg, uuid, "discarding", str(exc)) self.__discard_publish_uuid(uuid, failed) else: log.error(msg, uuid, "will try agan", str(exc)) failed.add(uuid) return None
Retry reading a message from the publish_uuid_store once, delete on the second failure.
def remove_port_profile_to_delete(self, profile_name, device_id): with self.session.begin(subtransactions=True): self.session.query(ucsm_model.PortProfileDelete).filter_by( profile_id=profile_name, device_id=device_id).delete()
Removes port profile to be deleted from table.
def work_experience(self, working_start_age: int = 22) -> int: age = self._store['age'] if age == 0: age = self.age() return max(age - working_start_age, 0)
Get a work experience. :param working_start_age: Age then person start to work. :return: Depend on previous generated age.
def start_pinging(self) -> None: assert self.ping_interval is not None if self.ping_interval > 0: self.last_ping = self.last_pong = IOLoop.current().time() self.ping_callback = PeriodicCallback( self.periodic_ping, self.ping_interval * 1000 ) self.ping_callback.start()
Start sending periodic pings to keep the connection alive
def update_user_type(self): if self.rb_tutor.isChecked(): self.user_type = 'tutor' elif self.rb_student.isChecked(): self.user_type = 'student' self.accept()
Return either 'tutor' or 'student' based on which radio button is selected.
def cli(ctx): wandb.try_to_set_up_global_logging() if ctx.invoked_subcommand is None: click.echo(ctx.get_help())
Weights & Biases. Run "wandb docs" for full documentation.
def write_supercells_with_displacements(supercell, cells_with_disps, filename="geo.gen"): write_dftbp(filename + "S", supercell) for ii in range(len(cells_with_disps)): write_dftbp(filename + "S-{:03d}".format(ii+1), cells_with_disps[ii])
Writes perfect supercell and supercells with displacements Args: supercell: perfect supercell cells_with_disps: supercells with displaced atoms filename: root-filename
def addrs_for_hash(self, h): if h not in self._hash_mapping: return self._mark_updated_mapping(self._hash_mapping, h) to_discard = set() for e in self._hash_mapping[h]: try: if h == hash(self[e].object): yield e else: to_discard.add(e) except KeyError: to_discard.add(e) self._hash_mapping[h] -= to_discard
Returns addresses that contain expressions that contain a variable with the hash of `h`.
def Attach(self, pid): if self.inferior.is_running: answer = raw_input('Already attached to process ' + str(self.inferior.pid) + '. Detach? [y]/n ') if answer and answer != 'y' and answer != 'yes': return None self.Detach() for plugin in self.plugins: plugin.position = None self.inferior.Reinit(pid)
Attach to the process with the given pid.
def handle_logout_response(self, response, sign_alg=None, digest_alg=None): logger.info("state: %s", self.state) status = self.state[response.in_response_to] logger.info("status: %s", status) issuer = response.issuer() logger.info("issuer: %s", issuer) del self.state[response.in_response_to] if status["entity_ids"] == [issuer]: self.local_logout(decode(status["name_id"])) return 0, "200 Ok", [("Content-type", "text/html")], [] else: status["entity_ids"].remove(issuer) if "sign_alg" in status: sign_alg = status["sign_alg"] return self.do_logout(decode(status["name_id"]), status["entity_ids"], status["reason"], status["not_on_or_after"], status["sign"], sign_alg=sign_alg, digest_alg=digest_alg)
handles a Logout response :param response: A response.Response instance :return: 4-tuple of (session_id of the last sent logout request, response message, response headers and message)
def get_publish_path(self, obj): return os.path.join( obj.chat_type.publish_path, obj.publish_path.lstrip("/") )
publish_path joins the publish_paths for the chat type and the channel.