code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _iter_dimensions(self): return ( Dimension(raw_dimension.dimension_dict, raw_dimension.dimension_type) for raw_dimension in self._raw_dimensions )
Generate Dimension object for each dimension dict.
def build(self): p = self.do_build() p += self.build_padding() p = self.build_done(p) return p
Create the current layer :return: string of the packet with the payload
def add_assertions(self, *assertions): for assertion in assertions: if isinstance(assertion, IndependenceAssertion): self.independencies.append(assertion) else: try: self.independencies.append(IndependenceAssertion(assertion[0], assertion[1], assertion[2])) except IndexError: self.independencies.append(IndependenceAssertion(assertion[0], assertion[1]))
Adds assertions to independencies. Parameters ---------- assertions: Lists or Tuples Each assertion is a list or tuple of variable, independent_of and given. Examples -------- >>> from pgmpy.independencies import Independencies >>> independencies = Independencies() >>> independencies.add_assertions(['X', 'Y', 'Z']) >>> independencies.add_assertions(['a', ['b', 'c'], 'd'])
def create(cls, tokens:Tokens, max_vocab:int, min_freq:int) -> 'Vocab': "Create a vocabulary from a set of `tokens`." freq = Counter(p for o in tokens for p in o) itos = [o for o,c in freq.most_common(max_vocab) if c >= min_freq] for o in reversed(defaults.text_spec_tok): if o in itos: itos.remove(o) itos.insert(0, o) return cls(itos)
Create a vocabulary from a set of `tokens`.
def create_page(cls, webdriver=None, **kwargs): if not webdriver: webdriver = WTF_WEBDRIVER_MANAGER.get_driver() return PageFactory.create_page(cls, webdriver=webdriver, **kwargs)
Class method short cut to call PageFactory on itself. Use it to instantiate this PageObject using a webdriver. Args: webdriver (Webdriver): Instance of Selenium Webdriver. Returns: PageObject Raises: InvalidPageError
def __search(self, value, type_attribute): results = [] if not value: raise EmptySearchtermError for idx, connection in enumerate(self.misp_connections): misp_response = connection.search(type_attribute=type_attribute, values=value) if isinstance(self.misp_name, list): name = self.misp_name[idx] else: name = self.misp_name results.append({'url': connection.root_url, 'name': name, 'result': self.__clean(misp_response)}) return results
Search method call wrapper. :param value: value to search for. :type value: str :param type_attribute: attribute types to search for. :type type_attribute: [list, none]
def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor: "Compute accuracy after expanding `y_true` to the size of `y_pred`." if sigmoid: y_pred = y_pred.sigmoid() return ((y_pred>thresh)==y_true[:,None].expand_as(y_pred).byte()).float().mean()
Compute accuracy after expanding `y_true` to the size of `y_pred`.
def devices(): out = __salt__['cmd.run_all']("blkid -o export") salt.utils.fsutils._verify_run(out) return salt.utils.fsutils._blkid_output(out['stdout'], fs_type='btrfs')
Get known BTRFS formatted devices on the system. CLI Example: .. code-block:: bash salt '*' btrfs.devices
def is_default(self): for field in self._fields: if not field.is_default(): return False return super(Container, self).is_default()
Checks if the field is in its default form :return: True if field is in default form
def attrib(self): return dict([ ('id', str(self.id)), ('name', str(self.name)), ('tectonicRegion', str(self.trt)), ])
General XML element attributes for a seismic source, as a dict.
def to_json(self): return { 'resourceType': self.resource.resource_type_id, 'resourceId': self.id, 'accountId': self.resource.account_id, 'account': self.account, 'location': self.resource.location, 'properties': {to_camelcase(prop.name): prop.value for prop in self.resource.properties}, 'tags': [{'key': t.key, 'value': t.value} for t in self.resource.tags] }
Return a `dict` representation of the resource, including all properties and tags Returns: `dict`
def find_patches(modules, recursive=True): out = [] modules = (module for package in modules for module in _module_iterator(package, recursive=recursive)) for module in modules: members = _get_members(module, filter=None) for _, value in members: base = _get_base(value) decorator_data = get_decorator_data(base) if decorator_data is None: continue out.extend(decorator_data.patches) return out
Find all the patches created through decorators. Parameters ---------- modules : list of module Modules and/or packages to search the patches in. recursive : bool ``True`` to search recursively in subpackages. Returns ------- list of gorilla.Patch Patches found. Raises ------ TypeError The input is not a valid package or module. See Also -------- :func:`patch`, :func:`patches`.
def _parse_geometry(self, geom): atoms = [] for i, line in enumerate(geom.splitlines()): sym, atno, x, y, z = line.split() atoms.append(Atom(sym, [float(x), float(y), float(z)], id=i)) return Molecule(atoms)
Parse a geometry string and return Molecule object from it.
def finish(self): chunks = [] while lib.BrotliEncoderIsFinished(self._encoder) == lib.BROTLI_FALSE: chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FINISH)) return b''.join(chunks)
Finish the compressor. This will emit the remaining output data and transition the compressor to a completed state. The compressor cannot be used again after this point, and must be replaced.
def fill_masked(self, value=-1, copy=True): if self.mask is None: raise ValueError('no mask is set') data = np.array(self.values, copy=copy) data[self.mask, ...] = value if copy: out = type(self)(data) out.is_phased = self.is_phased else: out = self out.mask = None return out
Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]], dtype='i1') >>> mask = [[True, False], [False, True], [False, False]] >>> g.mask = mask >>> g.fill_masked().values array([[[-1, -1], [ 0, 1]], [[ 0, 1], [-1, -1]], [[ 0, 2], [-1, -1]]], dtype=int8)
def _generate_autoscaling_metadata(self, cls, args): assert isinstance(args, Mapping) init_config = self._create_instance( cloudformation.InitConfig, args['AWS::CloudFormation::Init']['config']) init = self._create_instance( cloudformation.Init, {'config': init_config}) auth = None if 'AWS::CloudFormation::Authentication' in args: auth_blocks = {} for k in args['AWS::CloudFormation::Authentication']: auth_blocks[k] = self._create_instance( cloudformation.AuthenticationBlock, args['AWS::CloudFormation::Authentication'][k], k) auth = self._create_instance( cloudformation.Authentication, auth_blocks) return cls(init, auth)
Provides special handling for the autoscaling.Metadata object
def proxy_global(name, no_expand_macro=False, fname='func', args=()): if no_expand_macro: @property def gSomething_no_func(self): glob = self(getattr(ROOT, name)) def func(): return glob glob.func = func return glob return gSomething_no_func @property def gSomething(self): obj_func = getattr(getattr(ROOT, name), fname) try: obj = obj_func(*args) except ReferenceError: return None return self(obj) return gSomething
Used to automatically asrootpy ROOT's thread local variables
def _clamp_value(value, minimum, maximum): if maximum < minimum: raise ValueError if value < minimum: return minimum elif value > maximum: return maximum else: return value
Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8
def findfile(self, old, new): if exists(old): return old elif exists(new): return new else: debug("broken patch from Google Code, stripping prefixes..") if old.startswith(b'a/') and new.startswith(b'b/'): old, new = old[2:], new[2:] debug(" %s" % old) debug(" %s" % new) if exists(old): return old elif exists(new): return new return None
return name of file to be patched or None
def write_short_bytes(b): if b is None: return _NULL_SHORT_STRING if not isinstance(b, bytes): raise TypeError('{!r} is not bytes'.format(b)) elif len(b) > 32767: raise struct.error(len(b)) else: return struct.pack('>h', len(b)) + b
Encode a Kafka short string which contains arbitrary bytes. A short string is limited to 32767 bytes in length by the signed 16-bit length prefix. A length prefix of -1 indicates ``null``, represented as ``None`` in Python. :param bytes b: No more than 32767 bytes, or ``None`` for the null encoding. :return: length-prefixed `bytes` :raises: `struct.error` for strings longer than 32767 characters
def _hash_comparison(self): def hash_then_or(hash_name): return chain([hash_name], repeat(' or')) lines = [] for hash_name, expecteds in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend((' Expected %s %s' % (next(prefix), e)) for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines)
Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
def set_person(self, what, rep): if rep is None: if what in self._person: del self._person[what] self._person[what] = rep
Set a person substitution. Equivalent to ``! person`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution.
def sample(self, bqm, **kwargs): tkw = self._truncate_kwargs if self._aggregate: return self.child.sample(bqm, **kwargs).aggregate().truncate(**tkw) else: return self.child.sample(bqm, **kwargs).truncate(**tkw)
Sample from the problem provided by bqm and truncate output. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. **kwargs: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet`
def get_match_names(match): names = [] if "paren" in match: (match,) = match names += get_match_names(match) elif "var" in match: (setvar,) = match if setvar != wildcard: names.append(setvar) elif "trailer" in match: match, trailers = match[0], match[1:] for i in range(0, len(trailers), 2): op, arg = trailers[i], trailers[i + 1] if op == "as": names.append(arg) names += get_match_names(match) return names
Gets keyword names for the given match.
def length_range(string, minimum, maximum): int_range(len(string), minimum, maximum) return string
Requires values' length to be in a certain range. :param string: Value to validate :param minimum: Minimum length to accept :param maximum: Maximum length to accept :type string: str :type minimum: int :type maximum: int
def remove(self, event, subscriber): subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber)
Remove a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be removed.
def default_aux_file_paths(self, primary_path): return dict((n, primary_path[:-len(self.ext)] + ext) for n, ext in self.aux_files.items())
Get the default paths for auxiliary files relative to the path of the primary file, i.e. the same name as the primary path with a different extension Parameters ---------- primary_path : str Path to the primary file in the fileset Returns ------- aux_paths : dict[str, str] A dictionary of auxiliary file names and default paths
def add_store(source, store, saltenv='base'): cert_file = __salt__['cp.cache_file'](source, saltenv) cmd = "certutil.exe -addstore {0} {1}".format(store, cert_file) return __salt__['cmd.run'](cmd)
Add the given cert into the given Certificate Store source The source certificate file this can be in the form salt://path/to/file store The certificate store to add the certificate to saltenv The salt environment to use this is ignored if the path is local CLI Example: .. code-block:: bash salt '*' certutil.add_store salt://cert.cer TrustedPublisher
def get_title(self): if self.title: return self.title return self.get_model_class()._meta.verbose_name_plural
Get page title
def union(left, right, distinct=False): left, right = _make_different_sources(left, right) return UnionCollectionExpr(_lhs=left, _rhs=right, _distinct=distinct)
Union two collections. :param left: left collection :param right: right collection :param distinct: :return: collection :Example: >>> df['name', 'id'].union(df2['id', 'name'])
def _sendMessage(self, msg): if not msg: return msg = self._collapseMsg(msg) self.sendStatus(msg)
Collapse and send msg to the master
def non_decreasing(values): return all(x <= y for x, y in zip(values, values[1:]))
True if values are not decreasing.
def _get_bios_boot_resource(self, data): try: boot_uri = data['links']['Boot']['href'] except KeyError: msg = ('Boot resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, boot_settings = self._rest_get(boot_uri) if status != 200: msg = self._get_extended_error(boot_settings) raise exception.IloError(msg) return boot_settings
Get the Boot resource like BootSources. :param data: Existing Bios settings of the server. :returns: boot settings. :raises: IloCommandNotSupportedError, if resource is not found. :raises: IloError, on an error from iLO.
def run_report_from_console(output_file_name, callback): print("The report uses a read-only access to the book.") print("Now enter the data or ^Z to continue:") result = callback() output = save_to_temp(result, output_file_name) webbrowser.open(output)
Runs the report from the command line. Receives the book url from the console.
def _parse_key(key): splt = key.split("\\") hive = splt.pop(0) key = '\\'.join(splt) return hive, key
split the hive from the key
def bound_elems(elems): group_x0 = min(map(lambda l: l.x0, elems)) group_y0 = min(map(lambda l: l.y0, elems)) group_x1 = max(map(lambda l: l.x1, elems)) group_y1 = max(map(lambda l: l.y1, elems)) return (group_x0, group_y0, group_x1, group_y1)
Finds the minimal bbox that contains all given elems
def get_or_create_element(self, ns, name): if len(self._node.xpath('%s:%s' % (ns, name), namespaces=SLDNode._nsmap)) == 1: return getattr(self, name) return self.create_element(ns, name)
Attempt to get the only child element from this SLDNode. If the node does not exist, create the element, attach it to the DOM, and return the class object that wraps the node. @type ns: string @param ns: The namespace of the new element. @type name: string @param name: The name of the new element. @rtype: L{SLDNode} @return: The wrapped node, in the parent's property class. This will always be a descendent of SLDNode.
def method(self, symbol): assert issubclass(symbol, SymbolBase) def wrapped(fn): setattr(symbol, fn.__name__, fn) return wrapped
Symbol decorator.
def removeHandler(self, event_name): if event_name not in self.handlers: raise ValueError('{} is not a valid event'.format(event_name)) self.handlers[event_name] = None
Remove handler for given event.
def get_gdns_publisher(config, metrics, **kwargs): builder = gdns_publisher.GDNSPublisherBuilder( config, metrics, **kwargs) return builder.build_publisher()
Get a GDNSPublisher client. A factory function that validates configuration and returns a publisher client (:interface:`gordon.interfaces.IMessageHandler`) provider. Args: config (dict): Google Cloud DNS API related configuration. metrics (obj): :interface:`IMetricRelay` implementation. kwargs (dict): Additional keyword arguments to pass to the publisher. Returns: A :class:`GDNSPublisher` instance.
def _decode_config(conf_str): conf_str = conf_str.strip() conf = map( lambda x: True if x.upper() == "T" else False, list(conf_str) ) return dict(zip(settings._ALLOWED_MERGES, conf))
Decode string to configuration dict. Only values defined in settings._ALLOWED_MERGES can be redefined.
def was_installed_by_pip(pkg): try: dist = pkg_resources.get_distribution(pkg) return (dist.has_metadata('INSTALLER') and 'pip' in dist.get_metadata_lines('INSTALLER')) except pkg_resources.DistributionNotFound: return False
Checks whether pkg was installed by pip This is used not to display the upgrade message when pip is in fact installed by system package manager, such as dnf on Fedora.
def cd(path): old_dir = os.getcwd() try: os.makedirs(path) except OSError: pass os.chdir(path) try: yield finally: os.chdir(old_dir)
Creates the path if it doesn't exist
def _fft_convolve_numpy(data, h, plan = None, kernel_is_fft = False, kernel_is_fftshifted = False): if data.shape != h.shape: raise ValueError("data and kernel must have same size! %s vs %s "%(str(data.shape),str(h.shape))) data_g = OCLArray.from_array(data.astype(np.complex64)) if not kernel_is_fftshifted: h = np.fft.fftshift(h) h_g = OCLArray.from_array(h.astype(np.complex64)) res_g = OCLArray.empty_like(data_g) _fft_convolve_gpu(data_g,h_g,res_g = res_g, plan = plan, kernel_is_fft = kernel_is_fft) res = abs(res_g.get()) del data_g del h_g del res_g return res
convolving via opencl fft for numpy arrays data and h must have the same size
def run( main, argv=None, flags_parser=parse_flags_with_usage, ): try: args = _run_init( sys.argv if argv is None else argv, flags_parser, ) while _init_callbacks: callback = _init_callbacks.popleft() callback() try: _run_main(main, args) except UsageError as error: usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode) except: if FLAGS.pdb_post_mortem: traceback.print_exc() pdb.post_mortem() raise except Exception as e: _call_exception_handlers(e) raise
Begins executing the program. Args: main: The main function to execute. It takes an single argument "argv", which is a list of command line arguments with parsed flags removed. If it returns an integer, it is used as the process's exit code. argv: A non-empty list of the command line arguments including program name, sys.argv is used if None. flags_parser: Callable[[List[Text]], Any], the function used to parse flags. The return value of this function is passed to `main` untouched. It must guarantee FLAGS is parsed after this function is called. - Parses command line flags with the flag module. - If there are any errors, prints usage(). - Calls main() with the remaining arguments. - If main() raises a UsageError, prints usage and the error message.
def _send_splunk(event, index_override=None, sourcetype_override=None): opts = _get_options() log.info(str('Options: %s'), salt.utils.json.dumps(opts)) http_event_collector_key = opts['token'] http_event_collector_host = opts['indexer'] splunk_event = http_event_collector(http_event_collector_key, http_event_collector_host) payload = {} if index_override is None: payload.update({"index": opts['index']}) else: payload.update({"index": index_override}) if sourcetype_override is None: payload.update({"sourcetype": opts['sourcetype']}) else: payload.update({"index": sourcetype_override}) payload.update({"event": event}) log.info(str('Payload: %s'), salt.utils.json.dumps(payload)) splunk_event.sendEvent(payload) return True
Send the results to Splunk. Requires the Splunk HTTP Event Collector running on port 8088. This is available on Splunk Enterprise version 6.3 or higher.
def source_statement(self): if self._has_alias(): return 'import %s as %s' % (self.fullName, self.name) else: return 'import %s' % self.fullName
Generate a source statement equivalent to the import.
def disassemble_capstone(self, target_id=0, address=None, count=None): target = self._target(target_id) if not address: pc_name, address = self.pc() mem = self.memory(address, count * 16, target_id=target_id) md = capstone.Cs(*self.cs_archs[target['arch']]) output = [] for idx, i in enumerate(md.disasm(mem, address)): if idx >= count: break output.append("0x%x:\t%s\t%s" % (i.address, i.mnemonic, i.op_str)) return '\n'.join(output)
Disassemble with capstone.
def _handle_event(self, sid, namespace, id, data): namespace = namespace or '/' self.logger.info('received event "%s" from %s [%s]', data[0], sid, namespace) if self.async_handlers: self.start_background_task(self._handle_event_internal, self, sid, data, namespace, id) else: self._handle_event_internal(self, sid, data, namespace, id)
Handle an incoming client event.
def cleanup(self): if self.sock is not None: self.sock.close() if self.outfile is not None: self.outfile.close() if self.bar is not None: self.update_progress(complete=True)
Release resources used during memory capture
def remove_not_allowed_chars(savepath): split_savepath = os.path.splitdrive(savepath) savepath_without_invalid_chars = re.sub(r'<|>|:|\"|\||\?|\*', '_', split_savepath[1]) return split_savepath[0] + savepath_without_invalid_chars
Removes invalid filepath characters from the savepath. :param str savepath: the savepath to work on :return str: the savepath without invalid filepath characters
def probe_enable(cls, resource): oper = cls.call('hosting.rproxy.probe.enable', cls.usable_id(resource)) cls.echo('Activating probe on %s' % resource) cls.display_progress(oper) cls.echo('The probe have been activated') return oper
Activate a probe on a webaccelerator
def fg(color): ansi_code = [getattr(colorama.Fore, color.upper()), colorama.Fore.RESET] return lambda msg: msg.join(ansi_code)
Foreground color formatter function factory. Each function casts from a unicode string to a colored bytestring with the respective foreground color and foreground reset ANSI escape codes. You can also use the ``fg.color`` or ``fg[color]`` directly as attributes/items. The colors are the names of the ``colorama.Fore`` attributes (case insensitive). For more information, see: https://pypi.python.org/pypi/colorama https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
def reqPnL(self, account: str, modelCode: str = '') -> PnL: key = (account, modelCode) assert key not in self.wrapper.pnlKey2ReqId reqId = self.client.getReqId() self.wrapper.pnlKey2ReqId[key] = reqId pnl = PnL(account, modelCode) self.wrapper.pnls[reqId] = pnl self.client.reqPnL(reqId, account, modelCode) return pnl
Start a subscription for profit and loss events. Returns a :class:`.PnL` object that is kept live updated. The result can also be queried from :meth:`.pnl`. https://interactivebrokers.github.io/tws-api/pnl.html Args: account: Subscribe to this account. modelCode: If specified, filter for this account model.
async def _retrieve_messages_around_strategy(self, retrieve): if self.around: around = self.around.id if self.around else None data = await self.logs_from(self.channel.id, retrieve, around=around) self.around = None return data return []
Retrieve messages using around parameter.
def pdf(self, mu): return ss.cauchy.pdf(mu, self.loc0, self.scale0)
PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
def get_learner_stats(grad_info): if LEARNER_STATS_KEY in grad_info: return grad_info[LEARNER_STATS_KEY] multiagent_stats = {} for k, v in grad_info.items(): if type(v) is dict: if LEARNER_STATS_KEY in v: multiagent_stats[k] = v[LEARNER_STATS_KEY] return multiagent_stats
Return optimization stats reported from the policy graph. Example: >>> grad_info = evaluator.learn_on_batch(samples) >>> print(get_stats(grad_info)) {"vf_loss": ..., "policy_loss": ...}
def _format_value(self, val): name = self.name + ":" if not self.multiline or "\n" not in val: val = u"{0} {1}".format(name.ljust(self._text_prefix_len), val) else: spacer = "\n" + " " * (self._text_prefix_len + 1) val = u"{0}{1}{2}".format(name, spacer, spacer.join(val.split("\n"))) return val
formats a value to be good for textmode printing val must be unicode
def make_graph(node, inputs): initializer = [] tensor_input_info = [] tensor_output_info = [] for index in range(len(node.input)): tensor_input_info.append( helper.make_tensor_value_info(str(node.input[index]), TensorProto.FLOAT, [1])) if node.input[index] == 'W': dim = inputs[index].shape param_tensor = helper.make_tensor( name=node.input[index], data_type=TensorProto.FLOAT, dims=dim, vals=inputs[index].flatten()) initializer.append(param_tensor) for index in range(len(node.output)): tensor_output_info.append( helper.make_tensor_value_info(str(node.output[index]), TensorProto.FLOAT, [1])) graph_proto = helper.make_graph( [node], "test", tensor_input_info, tensor_output_info, initializer=initializer) return graph_proto
Created ONNX GraphProto from node
def _simulate_coef_from_bootstraps( self, n_draws, coef_bootstraps, cov_bootstraps): random_bootstrap_indices = np.random.choice( np.arange(len(coef_bootstraps)), size=n_draws, replace=True) bootstrap_index_to_draw_indices = defaultdict(list) for draw_index, bootstrap_index in enumerate(random_bootstrap_indices): bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index) coef_draws = np.empty((n_draws, len(self.coef_))) for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items(): coef_draws[draw_indices] = np.random.multivariate_normal( coef_bootstraps[bootstrap], cov_bootstraps[bootstrap], size=len(draw_indices)) return coef_draws
Simulate coefficients using bootstrap samples.
def data_from_stream(self, stream): parser = self._make_representation_parser(stream, self.resource_class, self._mapping) return parser.run()
Creates a data element reading a representation from the given stream. :returns: object implementing :class:`everest.representers.interfaces.IExplicitDataElement`
def find_side(ls, side): minx, miny, maxx, maxy = ls.bounds points = {'left': [(minx, miny), (minx, maxy)], 'right': [(maxx, miny), (maxx, maxy)], 'bottom': [(minx, miny), (maxx, miny)], 'top': [(minx, maxy), (maxx, maxy)],} return sgeom.LineString(points[side])
Given a shapely LineString which is assumed to be rectangular, return the line corresponding to a given side of the rectangle.
def _find_impl(cls, role, interface): module = _relation_module(role, interface) if not module: return None return cls._find_subclass(module)
Find relation implementation based on its role and interface.
def is_eighth_sponsor(self): from ..eighth.models import EighthSponsor return EighthSponsor.objects.filter(user=self).exists()
Determine whether the given user is associated with an. :class:`intranet.apps.eighth.models.EighthSponsor` and, therefore, should view activity sponsoring information.
def _precheck(self, curtailment_timeseries, feedin_df, curtailment_key): if not feedin_df.empty: feedin_selected_sum = feedin_df.sum(axis=1) diff = feedin_selected_sum - curtailment_timeseries diff[diff.between(-1, 0)] = 0 if not (diff >= 0).all(): bad_time_steps = [_ for _ in diff.index if diff[_] < 0] message = 'Curtailment demand exceeds total feed-in in time ' \ 'steps {}.'.format(bad_time_steps) logging.error(message) raise ValueError(message) else: bad_time_steps = [_ for _ in curtailment_timeseries.index if curtailment_timeseries[_] > 0] if bad_time_steps: message = 'Curtailment given for time steps {} but there ' \ 'are no generators to meet the curtailment target ' \ 'for {}.'.format(bad_time_steps, curtailment_key) logging.error(message) raise ValueError(message)
Raises an error if the curtailment at any time step exceeds the total feed-in of all generators curtailment can be distributed among at that time. Parameters ----------- curtailment_timeseries : :pandas:`pandas.Series<series>` Curtailment time series in kW for the technology (and weather cell) specified in `curtailment_key`. feedin_df : :pandas:`pandas.Series<series>` Feed-in time series in kW for all generators of type (and in weather cell) specified in `curtailment_key`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` Technology (and weather cell) curtailment is given for.
def round_sig(x, sig): return round(x, sig - int(floor(log10(abs(x)))) - 1)
Round the number to the specified number of significant figures
def create_link(id_link, post_data): if MLink.get_by_uid(id_link): return False try: the_order = int(post_data['order']) except: the_order = 999 TabLink.create(name=post_data['name'], link=post_data['link'], order=the_order, logo=post_data['logo'] if 'logo' in post_data else '', uid=id_link) return id_link
Add record in link.
def y_select_cb(self, w, index): try: self.y_col = self.cols[index] except IndexError as e: self.logger.error(str(e)) else: self.plot_two_columns(reset_ylimits=True)
Callback to set Y-axis column.
def read(self, filehandle): return self.__import(json.load(filehandle, **self.kwargs))
Read JSON from `filehandle`.
def group_id(self, value): if isinstance(value, GroupId): self._group_id = value else: self._group_id = GroupId(value)
The unsubscribe group to associate with this email. :param value: ID of an unsubscribe group :type value: GroupId, int, required
def to_array(self): array = super(Audio, self).to_array() array['file_id'] = u(self.file_id) array['duration'] = int(self.duration) if self.performer is not None: array['performer'] = u(self.performer) if self.title is not None: array['title'] = u(self.title) if self.mime_type is not None: array['mime_type'] = u(self.mime_type) if self.file_size is not None: array['file_size'] = int(self.file_size) if self.thumb is not None: array['thumb'] = self.thumb.to_array() return array
Serializes this Audio to a dictionary. :return: dictionary representation of this object. :rtype: dict
def _create_minimum_needs_action(self): icon = resources_path('img', 'icons', 'show-minimum-needs.svg') self.action_minimum_needs = QAction( QIcon(icon), self.tr('Minimum Needs Calculator'), self.iface.mainWindow()) self.action_minimum_needs.setStatusTip(self.tr( 'Open InaSAFE minimum needs calculator')) self.action_minimum_needs.setWhatsThis(self.tr( 'Open InaSAFE minimum needs calculator')) self.action_minimum_needs.triggered.connect(self.show_minimum_needs) self.add_action( self.action_minimum_needs, add_to_toolbar=self.full_toolbar)
Create action for minimum needs dialog.
def visualize(tree, max_level=100, node_width=10, left_padding=5): height = min(max_level, tree.height()-1) max_width = pow(2, height) per_level = 1 in_level = 0 level = 0 for node in level_order(tree, include_all=True): if in_level == 0: print() print() print(' '*left_padding, end=' ') width = int(max_width*node_width/per_level) node_str = (str(node.data) if node else '').center(width) print(node_str, end=' ') in_level += 1 if in_level == per_level: in_level = 0 per_level *= 2 level += 1 if level > height: break print() print()
Prints the tree to stdout
def get_by(cls, field, value): redis = cls.get_redis() key = cls.cls_key()+':index_'+field id = redis.hget(key, value) if id: return cls.get(debyte_string(id)) return None
Tries to retrieve an isinstance of this model from the database given a value for a defined index. Return None in case of failure
def from_file(filename=None, io='auto', prefix_dir=None, omit_facets=False): if isinstance(filename, Mesh): return filename if io == 'auto': if filename is None: output( 'filename or io must be specified!' ) raise ValueError else: io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir) output('reading mesh (%s)...' % (io.filename)) tt = time.clock() trunk = io.get_filename_trunk() mesh = Mesh(trunk) mesh = io.read(mesh, omit_facets=omit_facets) output('...done in %.2f s' % (time.clock() - tt)) mesh._set_shape_info() return mesh
Read a mesh from a file. Parameters ---------- filename : string or function or MeshIO instance or Mesh instance The name of file to read the mesh from. For convenience, a mesh creation function or a MeshIO instance or directly a Mesh instance can be passed in place of the file name. io : *MeshIO instance Passing *MeshIO instance has precedence over filename. prefix_dir : str If not None, the filename is relative to that directory. omit_facets : bool If True, do not read cells of lower dimension than the space dimension (faces and/or edges). Only some MeshIO subclasses support this!
def gsignal(name, *args, **kwargs): frame = sys._getframe(1) try: locals = frame.f_locals finally: del frame dict = locals.setdefault('__gsignals__', {}) if args and args[0] == 'override': dict[name] = 'override' else: retval = kwargs.get('retval', None) if retval is None: default_flags = gobject.SIGNAL_RUN_FIRST else: default_flags = gobject.SIGNAL_RUN_LAST flags = kwargs.get('flags', default_flags) if retval is not None and flags != gobject.SIGNAL_RUN_LAST: raise TypeError( "You cannot use a return value without setting flags to " "gobject.SIGNAL_RUN_LAST") dict[name] = (flags, retval, args)
Add a GObject signal to the current object. It current supports the following types: - str, int, float, long, object, enum :param name: name of the signal :param args: types for signal parameters, if the first one is a string 'override', the signal will be overridden and must therefor exists in the parent GObject. .. note:: flags: A combination of; - gobject.SIGNAL_RUN_FIRST - gobject.SIGNAL_RUN_LAST - gobject.SIGNAL_RUN_CLEANUP - gobject.SIGNAL_NO_RECURSE - gobject.SIGNAL_DETAILED - gobject.SIGNAL_ACTION - gobject.SIGNAL_NO_HOOKS
def fetch_timeline_history_files(self, max_timeline): while max_timeline > 1: self.c.execute("TIMELINE_HISTORY {}".format(max_timeline)) timeline_history = self.c.fetchone() history_filename = timeline_history[0] history_data = timeline_history[1].tobytes() self.log.debug("Received timeline history: %s for timeline %r", history_filename, max_timeline) compression_event = { "type": "CLOSE_WRITE", "compress_to_memory": True, "delete_file_after_compression": False, "input_data": BytesIO(history_data), "full_path": history_filename, "site": self.site, } self.compression_queue.put(compression_event) max_timeline -= 1
Copy all timeline history files found on the server without checking if we have them or not. The history files are very small so reuploading them should not matter.
def _sort_by_unique_fields(model, model_objs, unique_fields): unique_fields = [ field for field in model._meta.fields if field.attname in unique_fields ] def sort_key(model_obj): return tuple( field.get_db_prep_save(getattr(model_obj, field.attname), connection) for field in unique_fields ) return sorted(model_objs, key=sort_key)
Sort a list of models by their unique fields. Sorting models in an upsert greatly reduces the chances of deadlock when doing concurrent upserts
def refresh_products(self, **kwargs): for product in self.product_get(**kwargs): updated = False for current in self._cache.products[:]: if (current.get("id", -1) != product.get("id", -2) and current.get("name", -1) != product.get("name", -2)): continue _nested_update(current, product) updated = True break if not updated: self._cache.products.append(product)
Refresh a product's cached info. Basically calls product_get with the passed arguments, and tries to intelligently update our product cache. For example, if we already have cached info for product=foo, and you pass in names=["bar", "baz"], the new cache will have info for products foo, bar, baz. Individual product fields are also updated.
def exp(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_exp, (BigFloat._implicit_convert(x),), context, )
Return the exponential of x.
def needs_reboot(): with salt.utils.winapi.Com(): obj_sys = win32com.client.Dispatch('Microsoft.Update.SystemInfo') return salt.utils.data.is_true(obj_sys.RebootRequired)
Determines if the system needs to be rebooted. Returns: bool: True if the system requires a reboot, False if not CLI Examples: .. code-block:: bash import salt.utils.win_update salt.utils.win_update.needs_reboot()
def delete(cont, path=None, profile=None): swift_conn = _auth(profile) if path is None: return swift_conn.delete_container(cont) else: return swift_conn.delete_object(cont, path)
Delete a container, or delete an object from a container. CLI Example to delete a container:: salt myminion swift.delete mycontainer CLI Example to delete an object from a container:: salt myminion swift.delete mycontainer remoteobject
def migrate(*argv) -> bool: wf('Applying migrations... ', False) execute_from_command_line(['./manage.py', 'migrate'] + list(argv)) wf('[+]\n') return True
Runs Django migrate command. :return: always ``True``
def load(self, optional_cfg_files=None): optional_cfg_files = optional_cfg_files or [] if self._loaded: raise RuntimeError("INTERNAL ERROR: Attempt to load configuration twice!") try: namespace = {} self._set_defaults(namespace, optional_cfg_files) self._load_ini(namespace, os.path.join(self.config_dir, self.CONFIG_INI)) for cfg_file in optional_cfg_files: if not os.path.isabs(cfg_file): cfg_file = os.path.join(self.config_dir, cfg_file) if os.path.exists(cfg_file): self._load_ini(namespace, cfg_file) self._validate_namespace(namespace) self._load_py(namespace, namespace["config_script"]) self._validate_namespace(namespace) for callback in namespace["config_validator_callbacks"]: callback() except ConfigParser.ParsingError as exc: raise error.UserError(exc) self._loaded = True
Actually load the configuation from either the default location or the given directory.
def clause_annotations(self): if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]]
The list of clause annotations in ``words`` layer.
def bridge_to_vlan(br): cmd = 'ovs-vsctl br-to-vlan {0}'.format(br) result = __salt__['cmd.run_all'](cmd) if result['retcode'] != 0: return False return int(result['stdout'])
Returns the VLAN ID of a bridge. Args: br: A string - bridge name Returns: VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake bridge. If the bridge does not exist, False is returned. CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_to_parent br0
def check_links_status(self, fail_running=False, fail_pending=False): status_vector = JobStatusVector() for link in self._links.values(): key = JobDetails.make_fullkey(link.full_linkname) link_status = link.check_job_status(key, fail_running=fail_running, fail_pending=fail_pending) status_vector[link_status] += 1 return status_vector.get_status()
Check the status of all the jobs run from the `Link` objects in this `Chain` and return a status flag that summarizes that. Parameters ---------- fail_running : `bool` If True, consider running jobs as failed fail_pending : `bool` If True, consider pending jobs as failed Returns ------- status : `JobStatus` Job status flag that summarizes the status of all the jobs,
def set_thumbnail_size(self, width, height): cairo.cairo_pdf_surface_set_thumbnail_size( self._pointer, width, height)
Set thumbnail image size for the current and all subsequent pages. Setting a width or height of 0 disables thumbnails for the current and subsequent pages. :param width: thumbnail width. :param height: thumbnail height. *New in cairo 1.16.* *New in cairocffi 0.9.*
def resync_package(ctx, opts, owner, repo, slug, skip_errors): click.echo( "Resynchonising the %(slug)s package ... " % {"slug": click.style(slug, bold=True)}, nl=False, ) context_msg = "Failed to resynchronise package!" with handle_api_exceptions( ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors ): with maybe_spinner(opts): api_resync_package(owner=owner, repo=repo, identifier=slug) click.secho("OK", fg="green")
Resynchronise a package.
def origin(self): libfn = utils.get_lib_fn('getOrigin%s'%self._libsuffix) return libfn(self.pointer)
Get image origin Returns ------- tuple
def set_version(old_version, new_version): try: if APISettings.DEBUG: Shell.debug('* ' + old_version + ' --> ' + new_version) return True for line in fileinput.input(os.path.abspath(APISettings.VERSION_FILE), inplace=True): print(line.replace(old_version, new_version), end='') Shell.success('* ' + old_version + ' --> ' + new_version) except FileNotFoundError: Shell.warn('File not found!')
Write new version into VERSION_FILE
def wrap_lons(lons, base, period): lons = lons.astype(np.float64) return ((lons - base + period * 2) % period) + base
Wrap longitude values into the range between base and base+period.
def _help(): for task in sorted(TASKS, key=lambda x: (x.ns or '000') + x.name): tags = '' if task is DEFAULT: tags += '*' if task is SETUP: tags += '+' if task is TEARDOWN: tags += '-' print LOCALE['help_command'].format(task, tags, task.help) if task.aliases: print LOCALE['help_aliases'].format(task.aliasstr()) if task.reqs: print LOCALE['help_reqs'].format(task.reqstr()) if task.gens: print LOCALE['help_gens'].format(task.gens) if task.args or task.defaults: print LOCALE['help_args'].format(task.ns + task.name, task.kwargstr(), task.argstr())
Print all available tasks and descriptions.
def inline(self) -> str: return "{0}:{1}".format(self.index, ' '.join([str(p) for p in self.parameters]))
Return inline string format of the instance :return:
def set_fft_params(func): @wraps(func) def wrapped_func(series, method_func, *args, **kwargs): if isinstance(series, tuple): data = series[0] else: data = series normalize_fft_params(data, kwargs=kwargs, func=method_func) return func(series, method_func, *args, **kwargs) return wrapped_func
Decorate a method to automatically convert quantities to samples
def get_field_context(self, bound_field): widget = bound_field.field.widget widget_class_name = widget.__class__.__name__.lower() field_id = widget.attrs.get('id') or bound_field.auto_id if field_id: field_id = widget.id_for_label(field_id) return { 'form': self, 'field': bound_field, 'field_id': field_id, 'field_name': bound_field.name, 'errors': bound_field.errors, 'required': bound_field.field.required, 'label': bound_field.label, 'label_css_class': self.get_field_label_css_class(bound_field), 'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None, 'container_css_class': self.get_field_container_css_class(bound_field), 'widget_class_name': widget_class_name, 'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name }
Returns the context which is used when rendering a form field to HTML. The generated template context will contain the following variables: * form: `Form` instance * field: `BoundField` instance of the field * field_id: Field ID to use in `<label for="..">` * field_name: Name of the form field to render * errors: `ErrorList` instance with errors of the field * required: Boolean flag to signal if the field is required or not * label: The label text of the field * label_css_class: The optional label CSS class, might be `None` * help_text: Optional help text for the form field. Might be `None` * container_css_class: The CSS class for the field container. * widget_class_name: Lowercased version of the widget class name (e.g. 'textinput') * widget_input_type: `input_type` property of the widget instance, falls back to `widget_class_name` if not available. :return: Template context for field rendering.
def wait_for_a_future(futures, print_traceback=False): while True: try: future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX)) break except concurrent.futures.TimeoutError: pass except KeyboardInterrupt: if print_traceback: traceback.print_stack() else: print('') os._exit(os.EX_IOERR) return future
Return the next future that completes. If a KeyboardInterrupt is received, then the entire process is exited immediately. See wait_for_all_futures for more notes.
def submit_reading(basename, pmid_list_filename, readers, start_ix=None, end_ix=None, pmids_per_job=3000, num_tries=2, force_read=False, force_fulltext=False, project_name=None): sub = PmidSubmitter(basename, readers, project_name) sub.set_options(force_read, force_fulltext) sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job, num_tries) return sub.job_list
Submit an old-style pmid-centered no-database s3 only reading job. This function is provided for the sake of backward compatibility. It is preferred that you use the object-oriented PmidSubmitter and the submit_reading job going forward.
def reset(self): with self._cond: if self._count > 0: if self._state == 0: self._state = -1 elif self._state == -2: self._state = -1 else: self._state = 0 self._cond.notify_all()
Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised.
def get_media_urls(tweet): media = get_media_entities(tweet) urls = [m.get("media_url_https") for m in media] if media else [] return urls
Gets the https links to each media entity in the tweet. Args: tweet (Tweet or dict): tweet Returns: list: list of urls. Will be an empty list if there are no urls present. Example: >>> from tweet_parser.getter_methods.tweet_entities import get_media_urls >>> tweet = {'created_at': '2017-21-23T15:21:21.000Z', ... 'entities': {'user_mentions': [{'id': 2382763597, ... 'id_str': '2382763597', ... 'indices': [14, 26], ... 'name': 'Fiona', ... 'screen_name': 'notFromShrek'}]}, ... 'extended_entities': {'media': [{'display_url': 'pic.twitter.com/something', ... 'expanded_url': 'https://twitter.com/something', ... 'id': 4242, ... 'id_str': '4242', ... 'indices': [88, 111], ... 'media_url': 'http://pbs.twimg.com/media/something.jpg', ... 'media_url_https': 'https://pbs.twimg.com/media/something.jpg', ... 'sizes': {'large': {'h': 1065, 'resize': 'fit', 'w': 1600}, ... 'medium': {'h': 799, 'resize': 'fit', 'w': 1200}, ... 'small': {'h': 453, 'resize': 'fit', 'w': 680}, ... 'thumb': {'h': 150, 'resize': 'crop', 'w': 150}}, ... 'type': 'photo', ... 'url': 'https://t.co/something'}, ... {'display_url': 'pic.twitter.com/something_else', ... 'expanded_url': 'https://twitter.com/user/status/something/photo/1', ... 'id': 4243, ... 'id_str': '4243', ... 'indices': [88, 111], ... 'media_url': 'http://pbs.twimg.com/media/something_else.jpg', ... 'media_url_https': 'https://pbs.twimg.com/media/something_else.jpg', ... 'sizes': {'large': {'h': 1065, 'resize': 'fit', 'w': 1600}, ... 'medium': {'h': 799, 'resize': 'fit', 'w': 1200}, ... 'small': {'h': 453, 'resize': 'fit', 'w': 680}, ... 'thumb': {'h': 150, 'resize': 'crop', 'w': 150}}, ... 'type': 'photo', ... 'url': 'https://t.co/something_else'}]} ... } >>> get_media_urls(tweet) ['https://pbs.twimg.com/media/something.jpg', 'https://pbs.twimg.com/media/something_else.jpg']