code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def rm_(name, force=False, volumes=False, **kwargs): kwargs = __utils__['args.clean_kwargs'](**kwargs) stop_ = kwargs.pop('stop', False) timeout = kwargs.pop('timeout', None) auto_remove = False if kwargs: __utils__['args.invalid_kwargs'](kwargs) if state(name) == 'running' and not (force or stop_): raise CommandExecutionError( 'Container \'{0}\' is running, use force=True to forcibly ' 'remove this container'.format(name) ) if stop_ and not force: inspect_results = inspect_container(name) try: auto_remove = inspect_results['HostConfig']['AutoRemove'] except KeyError: log.error( 'Failed to find AutoRemove in inspect results, Docker API may ' 'have changed. Full results: %s', inspect_results ) stop(name, timeout=timeout) pre = ps_(all=True) if not auto_remove: _client_wrapper('remove_container', name, v=volumes, force=force) _clear_context() return [x for x in pre if x not in ps_(all=True)]
Removes a container name Container name or ID force : False If ``True``, the container will be killed first before removal, as the Docker API will not permit a running container to be removed. This option is set to ``False`` by default to prevent accidental removal of a running container. stop : False If ``True``, the container will be stopped first before removal, as the Docker API will not permit a running container to be removed. This option is set to ``False`` by default to prevent accidental removal of a running container. .. versionadded:: 2017.7.0 timeout Optional timeout to be passed to :py:func:`docker.stop <salt.modules.dockermod.stop>` if stopping the container. .. versionadded:: 2018.3.0 volumes : False Also remove volumes associated with container **RETURN DATA** A list of the IDs of containers which were removed CLI Example: .. code-block:: bash salt myminion docker.rm mycontainer salt myminion docker.rm mycontainer force=True
def _get_synonym(self, line): mtch = self.attr2cmp['synonym'].match(line) text, scope, typename, dbxrefs, _ = mtch.groups() typename = typename.strip() dbxrefs = set(dbxrefs.split(', ')) if dbxrefs else set() return self.attr2cmp['synonym nt']._make([text, scope, typename, dbxrefs])
Given line, return optional attribute synonym value in a namedtuple. Example synonym and its storage in a namedtuple: synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021] text: "The other white meat" scope: EXACT typename: MARKETING_SLOGAN dbxrefs: set(["MEAT:00324", "BACONBASE:03021"]) Example synonyms: "peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr] "regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label [] "tocopherol 13-hydroxylase activity" EXACT systematic_synonym []
def register_backend(self, name, backend): if not hasattr(backend, 'send') or not callable(backend.send): raise ValueError('Backend %s does not have a callable "send" method.' % backend.__class__.__name__) else: self.backends[name] = backend
Register a new backend that will be called for each processed event. Note that backends are called in the order that they are registered.
def serialize_properties(properties): new_properties = properties.copy() for attr_name, attr_value in new_properties.items(): if isinstance(attr_value, datetime): new_properties[attr_name] = attr_value.isoformat() elif not isinstance(attr_value, (dict, list, tuple, str, int, float, bool, type(None))): new_properties[attr_name] = str(attr_value) return new_properties
Serialize properties. Parameters ---------- properties : dict Properties to serialize.
def sleep(self): if self.next_time and time.time() < self.next_time: time.sleep(self.next_time - time.time())
Wait for the sleep time of the last response, to avoid being rate limited.
def get_content(self, zipbundle): for content, filename in self.get_zip_content(zipbundle): with io.BytesIO(content) as b: encoding = self._analyze_file(b) if encoding is None: encoding = self.default_encoding b.seek(0) text = b.read().decode(encoding) yield text, filename, encoding
Get content.
def disable_hyperthread(self): to_disable = [] online_cpus = self.__get_ranges("online") for cpu in online_cpus: fpath = path.join("cpu%i"%cpu,"topology","thread_siblings_list") to_disable += self.__get_ranges(fpath)[1:] to_disable = set(to_disable) & set(online_cpus) for cpu in to_disable: fpath = path.join("cpu%i"%cpu,"online") self.__write_cpu_file(fpath, b"0")
Disable all threads attached to the same core
def maybe_infer_freq(freq): freq_infer = False if not isinstance(freq, DateOffset): if freq != 'infer': freq = frequencies.to_offset(freq) else: freq_infer = True freq = None return freq, freq_infer
Comparing a DateOffset to the string "infer" raises, so we need to be careful about comparisons. Make a dummy variable `freq_infer` to signify the case where the given freq is "infer" and set freq to None to avoid comparison trouble later on. Parameters ---------- freq : {DateOffset, None, str} Returns ------- freq : {DateOffset, None} freq_infer : bool
def _get_xml_rpc(): vm_ = get_configured_provider() xml_rpc = config.get_cloud_config_value( 'xml_rpc', vm_, __opts__, search_global=False ) user = config.get_cloud_config_value( 'user', vm_, __opts__, search_global=False ) password = config.get_cloud_config_value( 'password', vm_, __opts__, search_global=False ) server = salt.ext.six.moves.xmlrpc_client.ServerProxy(xml_rpc) return server, user, password
Uses the OpenNebula cloud provider configurations to connect to the OpenNebula API. Returns the server connection created as well as the user and password values from the cloud provider config file used to make the connection.
def nla_put_u64(msg, attrtype, value): data = bytearray(value if isinstance(value, c_uint64) else c_uint64(value)) return nla_put(msg, attrtype, SIZEOF_U64, data)
Add 64 bit integer attribute to Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L638 Positional arguments: msg -- Netlink message (nl_msg class instance). attrtype -- attribute type (integer). value -- numeric value to store as payload (int() or c_uint64()). Returns: 0 on success or a negative error code.
def _get_linewidth(tree, linewidth, diameter_scale): if diameter_scale is not None and tree: linewidth = [2 * segment_radius(s) * diameter_scale for s in iter_segments(tree)] return linewidth
calculate the desired linewidth based on tree contents If diameter_scale exists, it is used to scale the diameter of each of the segments in the tree If diameter_scale is None, the linewidth is used.
def _comparator_eq(filter_value, tested_value): if isinstance(tested_value, ITERABLES): for value in tested_value: if not is_string(value): value = repr(value) if filter_value == value: return True elif not is_string(tested_value): return filter_value == repr(tested_value) else: return filter_value == tested_value return False
Tests if the filter value is equal to the tested value
def update_json_analysis(analysis, j): def _analyze_list(l, parent=""): for v in l: if isinstance(v, (dict, CaseInsensitiveDict)): _analyze_json(v, parent=parent) elif isinstance(v, list): _analyze_list(v, parent=parent+"[]") else: analysis[parent].add(v) def _analyze_json(d, parent=""): for k, v in d.iteritems(): if parent: path = ".".join([parent, k]) else: path = k if isinstance(v, (dict, CaseInsensitiveDict)): _analyze_json(v, parent=path) elif isinstance(v, list): _analyze_list(v, parent=path+"[]") else: analysis[path].add(v) if isinstance(j, list): _analyze_list(j) if isinstance(j, (dict, CaseInsensitiveDict)): _analyze_json(j)
Step through the items in a piece of json, and update an analysis dict with the values found.
def get_args(method_or_func): try: args = list(inspect.signature(method_or_func).parameters.keys()) except AttributeError: args = inspect.getargspec(method_or_func).args return args
Returns method or function arguments.
def _parse_engine(self): if self._parser.has_option('storage', 'engine'): engine = str(self._parser.get('storage', 'engine')) else: engine = ENGINE_DROPBOX assert isinstance(engine, str) if engine not in [ENGINE_DROPBOX, ENGINE_GDRIVE, ENGINE_COPY, ENGINE_ICLOUD, ENGINE_BOX, ENGINE_FS]: raise ConfigError('Unknown storage engine: {}'.format(engine)) return str(engine)
Parse the storage engine in the config. Returns: str
def alphanumeric(text): return "".join([c for c in text if re.match(r'\w', c)])
Make an ultra-safe, ASCII version a string. For instance for use as a filename. \w matches any alphanumeric character and the underscore.
def _from_dict(cls, _dict): args = {} if 'tones' in _dict: args['tones'] = [ ToneScore._from_dict(x) for x in (_dict.get('tones')) ] else: raise ValueError( 'Required property \'tones\' not present in ToneCategory JSON') if 'category_id' in _dict: args['category_id'] = _dict.get('category_id') else: raise ValueError( 'Required property \'category_id\' not present in ToneCategory JSON' ) if 'category_name' in _dict: args['category_name'] = _dict.get('category_name') else: raise ValueError( 'Required property \'category_name\' not present in ToneCategory JSON' ) return cls(**args)
Initialize a ToneCategory object from a json dictionary.
def is_resource_class_member_attribute(rc, attr_name): attr = get_resource_class_attribute(rc, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER
Checks if the given attribute name is a member attribute of the given registered resource.
def master_tops(self): log.debug( 'The _ext_nodes master function has been renamed to _master_tops. ' 'To ensure compatibility when using older Salt masters we will ' 'continue to invoke the function as _ext_nodes until the ' 'Magnesium release.' ) load = {'cmd': '_ext_nodes', 'id': self.opts['id'], 'opts': self.opts} if self.auth: load['tok'] = self.auth.gen_token(b'salt') return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \ else self.channel.send(load)
Return the metadata derived from the master_tops system
def meet_challenge(self, challenge): chunk_size = min(1024, self.file_size // 10) seed = challenge.seed h = hashlib.sha256() self.file_object.seek(challenge.block) if challenge.block > (self.file_size - chunk_size): end_slice = ( challenge.block - (self.file_size - chunk_size) ) h.update(self.file_object.read(end_slice)) self.file_object.seek(0) h.update(self.file_object.read(chunk_size - end_slice)) else: h.update(self.file_object.read(chunk_size)) h.update(seed) return h.digest()
Get the SHA256 hash of a specific file block plus the provided seed. The default block size is one tenth of the file. If the file is larger than 10KB, 1KB is used as the block size. :param challenge: challenge as a `Challenge <heartbeat.Challenge>` object
def global_add(self, key: str, value: Any) -> None: self.global_context[key] = value
Adds a key and value to the global dictionary
def begin_transaction(self): self.ensure_connected() self._transaction_nesting_level += 1 if self._transaction_nesting_level == 1: self._driver.begin_transaction() elif self._nest_transactions_with_savepoints: self.create_savepoint(self._get_nested_transaction_savepoint_name())
Starts a transaction by suspending auto-commit mode.
def deregister(self, subscriber): try: logger.debug('Subscriber left') self.subscribers.remove(subscriber) except KeyError: logger.debug( 'Error removing subscriber: ' + str(subscriber))
Stop publishing to a subscriber.
def get_padding_bias(x): with tf.name_scope("attention_bias"): padding = get_padding(x) attention_bias = padding * _NEG_INF attention_bias = tf.expand_dims( tf.expand_dims(attention_bias, axis=1), axis=1) return attention_bias
Calculate bias tensor from padding values in tensor. Bias tensor that is added to the pre-softmax multi-headed attention logits, which has shape [batch_size, num_heads, length, length]. The tensor is zero at non-padding locations, and -1e9 (negative infinity) at padding locations. Args: x: int tensor with shape [batch_size, length] Returns: Attention bias tensor of shape [batch_size, 1, 1, length].
def editpermissions_user_view(self, request, user_id, forum_id=None): user_model = get_user_model() user = get_object_or_404(user_model, pk=user_id) forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None context = self.get_forum_perms_base_context(request, forum) context['forum'] = forum context['title'] = '{} - {}'.format(_('Forum permissions'), user) context['form'] = self._get_permissions_form( request, UserForumPermission, {'forum': forum, 'user': user}, ) return render(request, self.editpermissions_user_view_template_name, context)
Allows to edit user permissions for the considered forum. The view displays a form to define which permissions are granted for the given user for the considered forum.
def monitor(args): filename = args.get('MDFILE') if not filename: print col('Need file argument', 2) raise SystemExit last_err = '' last_stat = 0 while True: if not os.path.exists(filename): last_err = 'File %s not found. Will continue trying.' % filename else: try: stat = os.stat(filename)[8] if stat != last_stat: parsed = run_args(args) print parsed last_stat = stat last_err = '' except Exception, ex: last_err = str(ex) if last_err: print 'Error: %s' % last_err sleep()
file monitor mode
def rename_dimension(x, old_name, new_name): return reshape(x, x.shape.rename_dimension(old_name, new_name))
Reshape a Tensor, renaming one dimension. Args: x: a Tensor old_name: a string new_name: a string Returns: a Tensor
def find_or_create_role(self, name, **kwargs): kwargs["name"] = name return self.find_role(name) or self.create_role(**kwargs)
Returns a role matching the given name or creates it with any additionally provided parameters.
def search_by_name(cls, name): records = aleph.downloadRecords( aleph.searchInAleph("aut", name, False, "wau") ) for record in records: marc = MARCXMLRecord(record) author = cls.parse_author(marc) if author: yield author
Look for author in NK Aleph authority base by `name`. Args: name (str): Author's name. Yields: obj: :class:`Author` instances.
def addReadGroupSet(self, readGroupSet): id_ = readGroupSet.getId() self._readGroupSetIdMap[id_] = readGroupSet self._readGroupSetNameMap[readGroupSet.getLocalId()] = readGroupSet self._readGroupSetIds.append(id_)
Adds the specified readGroupSet to this dataset.
def append(self, row_dict): entry = self.client.InsertRow(row_dict, self.key, self.worksheet) self.feed.entry.append(entry) return GDataRow(entry, sheet=self, deferred_save=self.deferred_save)
Add a row to the spreadsheet, returns the new row
def shadow(self,new_root,visitor) : for n in self.walk() : sn = n.clone(new_root) if n.isdir() : visitor.process_dir(n,sn) else : visitor.process_file(n,sn)
Runs through the query, creating a clone directory structure in the new_root. Then applies process
def print_total_timer(): if len(_TOTAL_TIMER_DATA) == 0: return for k, v in six.iteritems(_TOTAL_TIMER_DATA): logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format( k, v.sum, v.count, v.average))
Print the content of the TotalTimer, if it's not empty. This function will automatically get called when program exits.
def POINTER(obj): p = ctypes.POINTER(obj) if not isinstance(p.from_param, classmethod): def from_param(cls, x): if x is None: return cls() else: return x p.from_param = classmethod(from_param) return p
Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms.
def _post_tags(self, fileobj): page = OggPage.find_last(fileobj, self.serial, finishing=True) if page is None: raise OggVorbisHeaderError self.length = page.position / float(self.sample_rate)
Raises ogg.error
def broker_url(self): return 'amqp://{}:{}@{}/{}'.format( self.user, self.password, self.name, self.vhost)
Returns a "broker URL" for use with Celery.
def __make_scubadir(self): self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir') self.__scubadir_contpath = '/.scuba' self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
Make temp directory where all ancillary files are bind-mounted
def clean_uri(self): if self.instance.fixed: return self.instance.uri uri = self.cleaned_data['uri'] return uri
Validates the URI
def _set_cache_(self, attr): if attr == "size": oinfo = self.repo.odb.info(self.binsha) self.size = oinfo.size else: super(Object, self)._set_cache_(attr)
Retrieve object information
def json2pb(cls, json, strict=False): return dict2pb(cls, simplejson.loads(json), strict)
Takes a class representing the Protobuf Message and fills it with data from the json string.
def collides_axisaligned_rect(self, other): self_shifted = RotoOriginRect(self.width, self.height, -self.angle) s_a = self.sin_a() c_a = self.cos_a() center_x = self.x + self.width / 2.0 * c_a - self.height / 2.0 * s_a center_y = self.y - self.height / 2.0 * c_a - self.width / 2.0 * s_a other_shifted = Rect(other.x - center_x, other.y - center_y, other.width, other.height) return self_shifted.collides(other_shifted)
Returns collision with axis aligned other rect
def pipool(name, ivals): name = stypes.stringToCharP(name) n = ctypes.c_int(len(ivals)) ivals = stypes.toIntVector(ivals) libspice.pipool_c(name, n, ivals)
This entry point provides toolkit programmers a method for programmatically inserting integer data into the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pipool_c.html :param name: The kernel pool name to associate with values. :type name: str :param ivals: An array of integers to insert into the pool. :type ivals: Array of ints
def routeargs(path, host = None, vhost = None, method = [b'POST'], **kwargs): "For extra arguments, see Dispatcher.routeargs. They must be specified by keyword arguments" def decorator(func): func.routemode = 'routeargs' func.route_path = path func.route_host = host func.route_vhost = vhost func.route_method = method func.route_kwargs = kwargs return func return decorator
For extra arguments, see Dispatcher.routeargs. They must be specified by keyword arguments
def make_predicate_object_combinator(function, p, o): def predicate_object_combinator(subject): return function(subject, p, o) return predicate_object_combinator
Combinator to hold predicate object pairs until a subject is supplied and then call a function that accepts a subject, predicate, and object. Create a combinator to defer production of a triple until the missing pieces are supplied. Note that the naming here tells you what is stored IN the combinator. The argument to the combinator is the piece that is missing.
def get_controller_info_records(self): info_records = [] for controller_module_name in self._controller_objects.keys(): with expects.expect_no_raises( 'Failed to collect controller info from %s' % controller_module_name): record = self._create_controller_info_record( controller_module_name) if record: info_records.append(record) return info_records
Get the info records for all the controller objects in the manager. New info records for each controller object are created for every call so the latest info is included. Returns: List of records.ControllerInfoRecord objects. Each opject conatins the info of a type of controller
def error(self, **kwargs): exception_header_width = 100 e = Error(**kwargs) e.module = self.__class__.__name__ self.errors.append(e) if e.exception: sys.stderr.write("\n" + e.module + " Exception: " + str(e.exception) + "\n") sys.stderr.write("-" * exception_header_width + "\n") traceback.print_exc(file=sys.stderr) sys.stderr.write("-" * exception_header_width + "\n\n") elif e.description: sys.stderr.write("\n" + e.module + " Error: " + e.description + "\n\n")
Stores the specified error in self.errors. Accepts the same kwargs as the binwalk.core.module.Error class. Returns None.
def get_start_date(self): sdate = self._my_map['startDate'] return DateTime( sdate.year, sdate.month, sdate.day, sdate.hour, sdate.minute, sdate.second, sdate.microsecond)
Gets the start date. return: (osid.calendaring.DateTime) - the start date *compliance: mandatory -- This method must be implemented.*
def serializable_dict(d): newd = {} for k in d.keys(): if isinstance(d[k], type({})): newd[k] = serializable_dict(d[k]) continue try: json.dumps({'k': d[k]}) newd[k] = d[k] except: pass return newd
Return a dict like d, but with any un-json-serializable elements removed.
def get_user(self, name): r = self.kraken_request('GET', 'user/' + name) return models.User.wrap_get_user(r)
Get the user for the given name :param name: The username :type name: :class:`str` :returns: the user instance :rtype: :class:`models.User` :raises: None
def login(self, username, password, application, application_url): logger.debug(str((username, application, application_url))) method = self._anaconda_client_api.authenticate return self._create_worker(method, username, password, application, application_url)
Login to anaconda cloud.
def median(ls): ls = sorted(ls) return ls[int(floor(len(ls)/2.0))]
Takes a list and returns the median.
def PyParseIntCast(string, location, tokens): for index, token in enumerate(tokens): try: tokens[index] = int(token) except ValueError: logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format( token)) tokens[index] = 0 for key in tokens.keys(): try: tokens[key] = int(tokens[key], 10) except ValueError: logger.error( 'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format( key, tokens[key])) tokens[key] = 0
Return an integer from a string. This is a pyparsing callback method that converts the matched string into an integer. The method modifies the content of the tokens list and converts them all to an integer value. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored.
def has_operator_manifest(self): dockerfile = df_parser(self.workflow.builder.df_path, workflow=self.workflow) labels = Labels(dockerfile.labels) try: _, operator_label = labels.get_name_and_value(Labels.LABEL_TYPE_OPERATOR_MANIFESTS) except KeyError: operator_label = 'false' return operator_label.lower() == 'true'
Check if Dockerfile sets the operator manifest label :return: bool
def dump(filename, options, out=sys.stdout): with open(filename, 'rb') as file_obj: return _dump(file_obj, options=options, out=out)
Dump parquet file with given filename using options to `out`.
def predict(self, data, unkown=None): assert self.classifier is not None, 'not calibrated' bmus = self._som.bmus(data) return self._predict_from_bmus(bmus, unkown)
\ Classify data according to previous calibration. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param unkown: the label to attribute if no label is known :returns: the labels guessed for data :rtype: `numpy.array`
def display_vega(vega_data: dict, display: bool = True) -> Union['Vega', dict]: if VEGA_IPYTHON_PLUGIN_ENABLED and display: from vega3 import Vega return Vega(vega_data) else: return vega_data
Optionally display vega dictionary. Parameters ---------- vega_data : Valid vega data as dictionary display: Whether to try in-line display in IPython
def _apply_snap_off(self, queue=None): r net = self.project.network phase = self.project.find_phase(self) snap_off = self.settings['snap_off'] if queue is None: queue = self.queue[0] try: Pc_snap_off = phase[snap_off] logger.info("Adding snap off pressures to queue") for T in net.throats(): if not np.isnan(Pc_snap_off[T]): hq.heappush(queue, [Pc_snap_off[T], T, 'throat']) except KeyError: logger.warning("Phase " + phase.name + " doesn't have " + "property " + snap_off)
r""" Add all the throats to the queue with snap off pressure This is probably wrong!!!! Each one needs to start a new cluster.
def _ExtractJQuery(self, jquery_raw): data_part = '' if not jquery_raw: return {} if '[' in jquery_raw: _, _, first_part = jquery_raw.partition('[') data_part, _, _ = first_part.partition(']') elif jquery_raw.startswith('//'): _, _, first_part = jquery_raw.partition('{') data_part = '{{{0:s}'.format(first_part) elif '({' in jquery_raw: _, _, first_part = jquery_raw.partition('(') data_part, _, _ = first_part.rpartition(')') if not data_part: return {} try: data_dict = json.loads(data_part) except ValueError: return {} return data_dict
Extracts values from a JQuery string. Args: jquery_raw (str): JQuery string. Returns: dict[str, str]: extracted values.
def rfftn(a, s=None, axes=None, norm=None): unitary = _unitary(norm) if unitary: a = asarray(a) s, axes = _cook_nd_args(a, s, axes) output = mkl_fft.rfftn_numpy(a, s, axes) if unitary: n_tot = prod(asarray(s, dtype=output.dtype)) output *= 1 / sqrt(n_tot) return output
Compute the N-dimensional discrete Fourier Transform for real input. This function computes the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional real array by means of the Fast Fourier Transform (FFT). By default, all axes are transformed, with the real transform performed over the last axis, while the remaining transforms are complex. Parameters ---------- a : array_like Input array, taken to be real. s : sequence of ints, optional Shape (length along each transformed axis) to use from the input. (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). The final element of `s` corresponds to `n` for ``rfft(x, n)``, while for the remaining axes, it corresponds to `n` for ``fft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. axes : sequence of ints, optional Axes over which to compute the FFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` and `a`, as explained in the parameters section above. The length of the last axis transformed will be ``s[-1]//2+1``, while the remaining transformed axes will have lengths according to `s`, or unchanged from the input. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT of real input. fft : The one-dimensional FFT, with definitions and conventions used. rfft : The one-dimensional FFT of real input. fftn : The n-dimensional FFT. rfft2 : The two-dimensional FFT of real input. Notes ----- The transform for real input is performed over the last transformation axis, as by `rfft`, then the transform over the remaining axes is performed as by `fftn`. The order of the output is as for `rfft` for the final transformation axis, and as for `fftn` for the remaining transformation axes. See `fft` for details, definitions and conventions used. Examples -------- >>> a = np.ones((2, 2, 2)) >>> np.fft.rfftn(a) array([[[ 8.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j]], [[ 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j]]]) >>> np.fft.rfftn(a, axes=(2, 0)) array([[[ 4.+0.j, 0.+0.j], [ 4.+0.j, 0.+0.j]], [[ 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j]]])
def prepare_dependencies(self): attrs = [ ('bsources', 'bsourcesigs'), ('bdepends', 'bdependsigs'), ('bimplicit', 'bimplicitsigs'), ] for (nattr, sattr) in attrs: try: strings = getattr(self, nattr) nodeinfos = getattr(self, sattr) except AttributeError: continue if strings is None or nodeinfos is None: continue nodes = [] for s, ni in zip(strings, nodeinfos): if not isinstance(s, SCons.Node.Node): s = ni.str_to_node(s) nodes.append(s) setattr(self, nattr, nodes)
Prepares a FileBuildInfo object for explaining what changed The bsources, bdepends and bimplicit lists have all been stored on disk as paths relative to the top-level SConstruct directory. Convert the strings to actual Nodes (for use by the --debug=explain code and --implicit-cache).
def unpack(data): size, position = decoder._DecodeVarint(data, 0) envelope = wire.Envelope() envelope.ParseFromString(data[position:position+size]) return envelope
unpack from delimited data
def app(config, src, dst, features, reload, force): config = Path(config) if reload: argv = sys.argv.copy() argv.remove('--reload') monitor(config.dirname(), src, dst, argv) else: run(config, src, dst, force)
Takes several files or directories as src and generates the code in the given dst directory.
def cache_request_user(user_cls, request, user_id): pk_field = user_cls.pk_field() user = getattr(request, '_user', None) if user is None or getattr(user, pk_field, None) != user_id: request._user = user_cls.get_item(**{pk_field: user_id})
Helper function to cache currently logged in user. User is cached at `request._user`. Caching happens only only if user is not already cached or if cached user's pk does not match `user_id`. :param user_cls: User model class to use for user lookup. :param request: Pyramid Request instance. :user_id: Current user primary key field value.
def delete(self): response = self.hv.delete_request('people/' + str(self.id)) return response
Deletes the person immediately.
def df_first_row_to_dict(df): if df is not None: return [dict(r) for i, r in df.head(1).iterrows()][0]
First DataFrame row to list of dict Args: df (pandas.DataFrame): A DataFrame with at least one row Returns: A list of dict that looks like: [{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}] from a DataFrame that looks like: C1 C2 C3 1 x y z Else if `df` is `None`, returns `None`
def parseGeometry(self, geometry): self.coordinates = [] self.index = [] self.position = 0 self.lastX = 0 self.lastY = 0 self.isPoly = False self.isPoint = True; self.dropped = 0; self.first = True self._current_string = geometry reader = _ExtendedUnPacker(geometry) self._dispatchNextType(reader)
A factory method for creating objects of the correct OpenGIS type.
def owners(self): result = set() for role in self._OWNER_ROLES: for member in self._bindings.get(role, ()): result.add(member) return frozenset(result)
Legacy access to owner role. DEPRECATED: use ``policy["roles/owners"]`` instead.
def projC(gamma, q): return np.multiply(gamma, q / np.maximum(np.sum(gamma, axis=0), 1e-10))
return the KL projection on the column constrints
def get_widget_title(tab_label_text): title = '' title_list = tab_label_text.split('_') for word in title_list: title += word.upper() + ' ' title.strip() return title
Transform Notebook tab label to title by replacing underscores with white spaces and capitalizing the first letter of each word. :param tab_label_text: The string of the tab label to be transformed :return: The transformed title as a string
def apply_constraint(self,constraint,selectfrac_skip=False, distribution_skip=False,overwrite=False): constraints = self.constraints my_selectfrac_skip = self.selectfrac_skip my_distribution_skip = self.distribution_skip if constraint.name in constraints and not overwrite: logging.warning('constraint already applied: {}'.format(constraint.name)) return constraints[constraint.name] = constraint if selectfrac_skip: my_selectfrac_skip.append(constraint.name) if distribution_skip: my_distribution_skip.append(constraint.name) if hasattr(self, '_make_kde'): self._make_kde() self.constraints = constraints self.selectfrac_skip = my_selectfrac_skip self.distribution_skip = my_distribution_skip
Apply a constraint to the population :param constraint: Constraint to apply. :type constraint: :class:`Constraint` :param selectfrac_skip: (optional) If ``True``, then this constraint will not be considered towards diminishing the
def getWmg2(self, prefcounts, ordering, state, normalize=False): wmgMap = dict() for cand in state: wmgMap[cand] = dict() for cand1, cand2 in itertools.combinations(state, 2): wmgMap[cand1][cand2] = 0 wmgMap[cand2][cand1] = 0 for i in range(0, len(prefcounts)): for cand1, cand2 in itertools.combinations(ordering[i], 2): wmgMap[cand1][cand2] += prefcounts[i] if normalize == True: maxEdge = float('-inf') for cand in wmgMap.keys(): maxEdge = max(maxEdge, max(wmgMap[cand].values())) for cand1 in wmgMap.keys(): for cand2 in wmgMap[cand1].keys(): wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2]) / maxEdge return wmgMap
Generate a weighted majority graph that represents the whole profile. The function will return a two-dimensional dictionary that associates integer representations of each pair of candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. :ivar bool normalize: If normalize is True, the function will return a normalized graph where each edge has been divided by the value of the largest edge.
def prompt_for_password(url, user=None, default_user=None): if user is None: default_user = default_user or getpass.getuser() while user is None: user = compat.console_input( "Enter username for {} [{}]: ".format(url, default_user) ) if user.strip() == "" and default_user: user = default_user if user: pw = getpass.getpass( "Enter password for {}@{} (Ctrl+C to abort): ".format(user, url) ) if pw or pw == "": return (user, pw) return None
Prompt for username and password. If a user name is passed, only prompt for a password. Args: url (str): hostname user (str, optional): Pass a valid name to skip prompting for a user name default_user (str, optional): Pass a valid name that is used as default when prompting for a user name Raises: KeyboardInterrupt if user hits Ctrl-C Returns: (username, password) or None
def save_conf(fn=None): if fn is None: fn = cfile() try: os.makedirs(os.path.dirname(fn)) except (OSError, IOError): pass with open(fn, 'w') as f: yaml.dump(conf, f)
Save current configuration to file as YAML If not given, uses current config directory, ``confdir``, which can be set by INTAKE_CONF_DIR.
def groupby(xs, key_fn): result = defaultdict(list) for x in xs: key = key_fn(x) result[key].append(x) return result
Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`.
def arrows_at(self, x, y): for arrow in self.arrows(): if arrow.collide_point(x, y): yield arrow
Iterate over arrows that collide the given point.
def set_property(self, key, value): self.properties[key] = value self.sync_properties()
Update only one property in the dict
def console_input(default, validation=None, allow_empty=False): value = raw_input("> ") or default if value == "" and not allow_empty: print "Invalid: Empty value is not permitted." return console_input(default, validation) if validation: try: return validation(value) except ValidationError, e: print "Invalid: ", e return console_input(default, validation) return value
Get user input value from stdin Parameters ---------- default : string A default value. It will be used when user input nothing. validation : callable A validation function. The validation function must raise an error when validation has failed. Returns ------- string or any A user input string or validated value
def decompress(data, compression, width, height, depth, version=1): length = width * height * depth // 8 result = None if compression == Compression.RAW: result = data[:length] elif compression == Compression.PACK_BITS: result = decode_packbits(data, height, version) elif compression == Compression.ZIP: result = zlib.decompress(data) else: decompressed = zlib.decompress(data) result = decode_prediction(decompressed, width, height, depth) assert len(result) == length, 'len=%d, expected=%d' % ( len(result), length ) return result
Decompress raw data. :param data: compressed data bytes. :param compression: compression type, see :py:class:`~psd_tools.constants.Compression`. :param width: width. :param height: height. :param depth: bit depth of the pixel. :param version: psd file version. :return: decompressed data bytes.
def fake_chars_or_choice(self, field_name): return self.djipsum_fields().randomCharField( self.model_class(), field_name=field_name )
Return fake chars or choice it if the `field_name` has choices. Then, returning random value from it. This specially for `CharField`. Usage: faker.fake_chars_or_choice('field_name') Example for field: TYPE_CHOICES = ( ('project', 'I wanna to talk about project'), ('feedback', 'I want to report a bugs or give feedback'), ('hello', 'I just want to say hello') ) type = models.CharField(max_length=200, choices=TYPE_CHOICES)
def configure_roles_on_host(api, host): for role_ref in host.roleRefs: if role_ref.get('clusterName') is None: continue role = api.get_cluster(role_ref['clusterName'])\ .get_service(role_ref['serviceName'])\ .get_role(role_ref['roleName']) LOG.debug("Evaluating %s (%s)" % (role.name, host.hostname)) config = None if role.type == 'DATANODE': config = DATANODE_CONF elif role.type == 'TASKTRACKER': config = TASKTRACKER_CONF elif role.type == 'REGIONSERVER': config = REGIONSERVER_CONF else: continue LOG.info("Configuring %s (%s)" % (role.name, host.hostname)) role.update_config(config)
Go through all the roles on this host, and configure them if they match the role types that we care about.
def Task(func, *args, **kwargs): future = Future() def handle_exception(typ, value, tb): if future.done(): return False future.set_exc_info((typ, value, tb)) return True def set_result(result): if future.done(): return future.set_result(result) with stack_context.ExceptionStackContext(handle_exception): func(*args, callback=_argument_adapter(set_result), **kwargs) return future
Adapts a callback-based asynchronous function for use in coroutines. Takes a function (and optional additional arguments) and runs it with those arguments plus a ``callback`` keyword argument. The argument passed to the callback is returned as the result of the yield expression. .. versionchanged:: 4.0 ``gen.Task`` is now a function that returns a `.Future`, instead of a subclass of `YieldPoint`. It still behaves the same way when yielded.
def contains(self, location): return self.almostEqual( sum([coord ** 2 for coord in location]), self.radius ** 2 )
Checks that the provided point is on the sphere.
def uploads(self): if self._resources is None: self.__init() if "uploads" in self._resources: url = self._url + "/uploads" return _uploads.Uploads(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
returns an object to work with the site uploads
def delete_queue(name, region, opts=None, user=None): queues = list_queues(region, opts, user) url_map = _parse_queue_list(queues) log.debug('map %s', url_map) if name in url_map: delete = {'queue-url': url_map[name]} rtn = _run_aws( 'delete-queue', region=region, opts=opts, user=user, **delete) success = True err = '' out = '{0} deleted'.format(name) else: out = '' err = "Delete failed" success = False ret = { 'retcode': 0 if success else 1, 'stdout': out, 'stderr': err, } return ret
Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region>
def serialize(self, data): self.set_header('Content-Type', self.content_type) return self.dumper(data)
Serlialize the output based on the Accept header
def get_indexable(cls): model = cls.get_model() return model.objects.order_by('id').values_list('id', flat=True)
Returns the queryset of ids of all things to be indexed. Defaults to:: cls.get_model().objects.order_by('id').values_list( 'id', flat=True) :returns: iterable of ids of objects to be indexed
def add_attribute(self, name, value): if not issubclass(value.__class__, Attribute): raise foundations.exceptions.NodeAttributeTypeError( "Node attribute value must be a '{0}' class instance!".format(Attribute.__class__.__name__)) if self.attribute_exists(name): raise foundations.exceptions.NodeAttributeExistsError("Node attribute '{0}' already exists!".format(name)) self[name] = value return True
Adds given attribute to the node. Usage:: >>> node_a = AbstractNode() >>> node_a.add_attribute("attributeA", Attribute()) True >>> node_a.list_attributes() [u'attributeA'] :param name: Attribute name. :type name: unicode :param value: Attribute value. :type value: Attribute :return: Method success. :rtype: bool
def get_active_channels_by_year_quarter( self, channel_type, year, quarter, expires=None): if expires is None: expires = datetime.combine(datetime.utcnow().date(), time.min) return self.search_channels( type=channel_type, tag_year=year, tag_quarter=quarter, expires_after=expires.isoformat())
Search for all active channels by year and quarter
def shutdown(self): if not self._exited: self._exited = True if self._task_runner.is_alive(): self._task_runner.terminate() if self._command_server.is_alive(): if self._task_runner.is_alive(): self._task_runner.join() _shutdown_pipe(self._pipe) self._task.stop()
Shuts down the daemon process.
def iterable(self, iterable_name, *, collection, attribute, word, func=None, operation=None): if func is None and operation is None: raise ValueError('Provide a function or an operation to apply') elif func is not None and operation is not None: raise ValueError( 'Provide either a function or an operation but not both') current_att = self._attribute self._attribute = iterable_name word = self._parse_filter_word(word) collection = self._get_mapping(collection) attribute = self._get_mapping(attribute) if func is not None: sentence = self._prepare_function(func, attribute, word) else: sentence = self._prepare_sentence(attribute, operation, word) filter_str, attrs = sentence filter_data = '{}/{}(a:a/{})'.format(collection, iterable_name, filter_str), attrs self._add_filter(*filter_data) self._attribute = current_att return self
Performs a filter with the OData 'iterable_name' keyword on the collection For example: q.iterable('any', collection='email_addresses', attribute='address', operation='eq', word='george@best.com') will transform to a filter such as: emailAddresses/any(a:a/address eq 'george@best.com') :param str iterable_name: the OData name of the iterable :param str collection: the collection to apply the any keyword on :param str attribute: the attribute of the collection to check :param str word: the word to check :param str func: the logical function to apply to the attribute inside the collection :param str operation: the logical operation to apply to the attribute inside the collection :rtype: Query
def get_assigned_services_uids(self): services = self.get_assigned_services() uids = map(api.get_uid, services) return list(set(uids))
Get the current assigned services UIDs of this Worksheet
def write (self, s, **args): if self.filename is not None: self.start_fileoutput() if self.fd is None: log.warn(LOG_CHECK, "writing to unitialized or closed file") else: try: self.fd.write(s, **args) except IOError: msg = sys.exc_info()[1] log.warn(LOG_CHECK, "Could not write to output file: %s\n" "Disabling log output of %s", msg, self) self.close_fileoutput() self.fd = dummy.Dummy() self.is_active = False
Write string to output descriptor. Strips control characters from string before writing.
def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'): has_node = graph.has_node n = counter() node_id_format = _format.format(initial_guess) node_id = initial_guess while has_node(node_id): node_id = node_id_format % n() return node_id
Finds an unused node id in `graph`. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :param initial_guess: Initial node id guess. :type initial_guess: str, optional :param _format: Format to generate the new node id if the given is already used. :type _format: str, optional :return: An unused node id. :rtype: str
def _apply_backwards_compatibility(df): df.row_count = types.MethodType(lambda self: len(self.index), df) df.col_count = types.MethodType(lambda self: len(self.columns), df) df.dataframe = df
Attach properties to the Dataframe to make it backwards compatible with older versions of this library :param df: The dataframe to be modified
def valid_env_vars() -> bool: for envvar in _REQUIRED_ENV_VARS: try: _check_env_var(envvar) except KeyError as ex: LOG.error(ex) sys.exit(1) return True
Validate that required env vars exist. :returns: True if required env vars exist. .. versionadded:: 0.0.12
def parse_torrent_file(torrent): link_re = re.compile(r'^(http?s|ftp)') if link_re.match(torrent): response = requests.get(torrent, headers=HEADERS, timeout=20) data = parse_torrent_buffer(response.content) elif os.path.isfile(torrent): with open(torrent, 'rb') as f: data = parse_torrent_buffer(f.read()) else: data = None return data
parse local or remote torrent file
def set_call_back(self, func): self.timer.add_callback(func) self.timer.start()
sets callback function for updating the plot. in the callback function implement the logic of reading of serial input also the further processing of the signal if necessary has to be done in this callbak function.
def qteRemoveKey(self, keysequence: QtmacsKeysequence): keyMap = self keyMapRef = keyMap keysequence = keysequence.toQtKeylist() for key in keysequence[:-1]: if key not in keyMap: return keyMap = keyMap[key] if keysequence[-1] not in keyMap: return else: keyMap.pop(keysequence[-1]) keysequence = keysequence[:-1] while(len(keysequence)): keyMap = keyMapRef for key in keysequence[:-1]: keyMap = keyMap[key] if len(keyMap[key]): return else: keyMap.pop(key)
Remove ``keysequence`` from this key map. |Args| * ``keysequence`` (**QtmacsKeysequence**): key sequence to remove from this key map. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def get_user_id_by_user(self, username): response, status_code = self.__pod__.Users.get_v2_user( sessionToken=self.__session__, username=username ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
get user id by username
def deleted_records(endpoint): @utils.for_each_value def _deleted_records(self, key, value): deleted_recid = maybe_int(value.get('a')) if deleted_recid: return get_record_ref(deleted_recid, endpoint) return _deleted_records
Populate the ``deleted_records`` key.