code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _get_instance_path(self, name): "Return a path to the pickled data with key ``name``." fname = self.pattern.format(**{'name': name}) logger.debug(f'path {self.create_path}: {self.create_path.exists()}') self._create_path_dir() return Path(self.create_path, fname)
Return a path to the pickled data with key ``name``.
def refreshUi( self ): dataSet = self.dataSet() if not dataSet: return False for widget in self.findChildren(QWidget): prop = unwrapVariant(widget.property('dataName')) if prop is None: continue prop_name = nativestring(prop) if prop_name in dataSet: value = dataSet.value(prop_name) projexui.setWidgetValue(widget, value) return True
Load the plugin information to the interface.
def validate(self, obj): if self.path: for i in self.path: obj = obj[i] obj = obj[self.field] raise NotImplementedError('Validation is not implemented yet')
check if obj has this api param
def standardizeMapName(mapName): newName = os.path.basename(mapName) newName = newName.split(".")[0] newName = newName.split("(")[0] newName = re.sub("[LT]E+$", "", newName) newName = re.sub("-", "", newName) newName = re.sub(' ', '', newName, flags=re.UNICODE) foreignName = newName if foreignName in c.mapNameTranslations: return c.mapNameTranslations[foreignName] return newName
pretty-fy the name for pysc2 map lookup
def arch_size(self): if not self._ptr: raise BfdException("BFD not initialized") try: return _bfd.get_arch_size(self._ptr) except Exception, err: raise BfdException("Unable to determine architeure size.")
Return the architecure size in bits.
def copy_abs(self): result = mpfr.Mpfr_t.__new__(BigFloat) mpfr.mpfr_init2(result, self.precision) mpfr.mpfr_setsign(result, self, False, ROUND_TIES_TO_EVEN) return result
Return a copy of self with the sign bit unset. Unlike abs(self), this does not make use of the context: the result has the same precision as the original.
def run(self): for fn in glob_all(self.args.random_data_folder, '*.wav'): if fn in self.trained_fns: print('Skipping ' + fn + '...') continue print('Starting file ' + fn + '...') self.train_on_audio(fn) print('\r100% ') self.trained_fns.append(fn) save_trained_fns(self.trained_fns, self.args.model)
Begin reading through audio files, saving false activations and retraining when necessary
def save_model(self, fname='model.js'): exp_colorscale_str = json.dumps(self._exp_colorscale) mut_colorscale_str = json.dumps(self._mut_colorscale) cyjs_dict = {'edges': self._edges, 'nodes': self._nodes} model_str = json.dumps(cyjs_dict, indent=1, sort_keys=True) model_dict = {'exp_colorscale_str': exp_colorscale_str, 'mut_colorscale_str': mut_colorscale_str, 'model_elements_str': model_str} s = '' s += 'var exp_colorscale = %s;\n' % model_dict['exp_colorscale_str'] s += 'var mut_colorscale = %s;\n' % model_dict['mut_colorscale_str'] s += 'var model_elements = %s;\n' % model_dict['model_elements_str'] with open(fname, 'wb') as fh: fh.write(s.encode('utf-8'))
Save the assembled Cytoscape JS network in a js file. Parameters ---------- file_name : Optional[str] The name of the file to save the Cytoscape JS network to. Default: model.js
def run_callback(self, callback, *args): if self._loop is None: raise RuntimeError('hub is closed') elif not callable(callback): raise TypeError('"callback": expecting a callable') self._callbacks.append((callback, args)) self._interrupt_loop()
Queue a callback. The *callback* will be called with positional arguments *args* in the next iteration of the event loop. If you add multiple callbacks, they will be called in the order that you added them. The callback will run in the Hub's fiber. This method is thread-safe: it is allowed to queue a callback from a different thread than the one running the Hub.
def cross_goal(state): centres, edges = state for edge in edges: if "D" not in edge.facings: return False if edge["D"] != centres["D"]["D"]: return False k = "".join(edge.facings.keys()).replace("D", "") if edge[k] != centres[k][k]: return False return True
The goal function for cross solving search.
def get_translations(self, context_id): _mask = ('[mask[addressTranslations[customerIpAddressRecord,' 'internalIpAddressRecord]]]') context = self.get_tunnel_context(context_id, mask=_mask) for translation in context.get('addressTranslations', []): remote_ip = translation.get('customerIpAddressRecord', {}) internal_ip = translation.get('internalIpAddressRecord', {}) translation['customerIpAddress'] = remote_ip.get('ipAddress', '') translation['internalIpAddress'] = internal_ip.get('ipAddress', '') translation.pop('customerIpAddressRecord', None) translation.pop('internalIpAddressRecord', None) return context['addressTranslations']
Retrieves all translation entries for a tunnel context. :param int context_id: The id-value representing the context instance. :return list(dict): Translations associated with the given context
def set_git_user_email(): username = subprocess.run(shlex.split('git config user.name'), stdout=subprocess.PIPE).stdout.strip().decode('utf-8') if not username or username == "Travis CI User": run(['git', 'config', '--global', 'user.name', "Doctr (Travis CI)"]) else: print("Not setting git user name, as it's already set to %r" % username) email = subprocess.run(shlex.split('git config user.email'), stdout=subprocess.PIPE).stdout.strip().decode('utf-8') if not email or email == "travis@example.org": run(['git', 'config', '--global', 'user.email', 'drdoctr@users.noreply.github.com']) else: print("Not setting git user email, as it's already set to %r" % email)
Set global user and email for git user if not already present on system
def _repr_latex_(self): lines = [] lines.append(r"<h1>{0}</h1>".format(self.__class__.__name__)) lines.append("<p>Method: <code>{0!r}</code></p>".format(self.method)) lines.append("<p>Parameters: <code>{0!r}</code></p>".format(self.parameters)) lines.append("<p>Terms:</p>") lines.append("<ul>") lines.extend(['<li><code>{0!r}</code></li>'.format(lhs) for lhs in self.left_hand_side_descriptors]) lines.append("</ul>") lines.append('<hr />') lines.append(r"\begin{align*}") for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side): lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs))) lines.append(r"\end{align*}") return "\n".join(lines)
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX. How Cool is this?
def deprecated(replacement=None, version=None): def outer(oldfun): def inner(*args, **kwargs): msg = "%s is deprecated" % oldfun.__name__ if version is not None: msg += "will be removed in version %s;" % version if replacement is not None: msg += "; use %s instead" % (replacement) warnings.warn(msg, DeprecationWarning, stacklevel=2) if callable(replacement): return replacement(*args, **kwargs) else: return oldfun(*args, **kwargs) return inner return outer
A decorator which can be used to mark functions as deprecated. replacement is a callable that will be called with the same args as the decorated function. >>> import pytest >>> @deprecated() ... def foo1(x): ... return x ... >>> pytest.warns(DeprecationWarning, foo1, 1) 1 >>> def newfun(x): ... return 0 ... >>> @deprecated(newfun, '1.1') ... def foo2(x): ... return x ... >>> pytest.warns(DeprecationWarning, foo2, 1) 0 >>>
def num_available(self, work_spec_name): return self.registry.len(WORK_UNITS_ + work_spec_name, priority_max=time.time())
Get the number of available work units for some work spec. These are work units that could be returned by :meth:`get_work`: they are not complete, not currently executing, and not blocked on some other work unit.
def delete_archive_file(self): logger.debug("Deleting %s", self.archive_tmp_dir) shutil.rmtree(self.archive_tmp_dir, True)
Delete the directory containing the constructed archive
def put_value(self, value, timeout=None): self._context.put(self._data.path + ["value"], value, timeout=timeout)
Put a value to the Attribute and wait for completion
def add_and_shuffle(self, peer): self.push_peer(peer) r = random.randint(0, self.size() - 1) self.swap_order(peer.index, r)
Push a new peer into the heap and shuffle the heap
def _get_int64(data, position, dummy0, dummy1, dummy2): end = position + 8 return Int64(_UNPACK_LONG(data[position:end])[0]), end
Decode a BSON int64 to bson.int64.Int64.
def request(self, method, path, options=None, payload=None, heartbeater=None, retry_count=0): def _request(authHeaders, options, payload, heartbeater, retry_count): tenantId = authHeaders['X-Tenant-Id'] requestUrl = self.baseUrl + tenantId + path if options: requestUrl += '?' + urlencode(options) payload = StringProducer(json.dumps(payload)) if payload else None d = self.agent.request(method=method, uri=requestUrl, headers=None, bodyProducer=payload) d.addCallback(self.cbRequest, method, path, options, payload, heartbeater, retry_count) return d d = self.agent.getAuthHeaders() d.addCallback(_request, options, payload, heartbeater, retry_count) return d
Make a request to the Service Registry API. @param method: HTTP method ('POST', 'GET', etc.). @type method: C{str} @param path: Path to be appended to base URL ('/sessions', etc.). @type path: C{str} @param options: Options to be encoded as query parameters in the URL. @type options: C{dict} @param payload: Optional body @type payload: C{dict} @param heartbeater: Optional heartbeater passed in when creating a session. @type heartbeater: L{HeartBeater}
def delete_messages( self, chat_id: Union[int, str], message_ids: Iterable[int], revoke: bool = True ) -> bool: peer = self.resolve_peer(chat_id) message_ids = list(message_ids) if not isinstance(message_ids, int) else [message_ids] if isinstance(peer, types.InputPeerChannel): r = self.send( functions.channels.DeleteMessages( channel=peer, id=message_ids ) ) else: r = self.send( functions.messages.DeleteMessages( id=message_ids, revoke=revoke or None ) ) return bool(r.pts_count)
Use this method to delete messages, including service messages. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_ids (``iterable``): A list of Message identifiers to delete or a single message id. Iterators and Generators are also accepted. revoke (``bool``, *optional*): Deletes messages on both parts. This is only for private cloud chats and normal groups, messages on channels and supergroups are always revoked (i.e.: deleted for everyone). Defaults to True. Returns: True on success, False otherwise. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def _get_auth(username, password): if username and password: return requests.auth.HTTPBasicAuth(username, password) else: return None
Returns the HTTP auth header
def hmget(self, key, *fields): def format_response(val_array): return dict(zip(fields, val_array)) command = [b'HMGET', key] command.extend(fields) return self._execute(command, format_callback=format_response)
Returns the values associated with the specified `fields` in a hash. For every ``field`` that does not exist in the hash, :data:`None` is returned. Because a non-existing keys are treated as empty hashes, calling :meth:`hmget` against a non-existing key will return a list of :data:`None` values. .. note:: *Time complexity*: ``O(N)`` where ``N`` is the number of fields being requested. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: a :class:`dict` of field name to value mappings for each of the requested fields :rtype: dict
def set_initial_representations(self): self.standard_settings() cmd.set('dash_gap', 0) cmd.set('ray_shadow', 0) cmd.set('cartoon_color', 'mylightblue') cmd.clip('far', -1000) cmd.clip('near', 1000)
General settings for PyMOL
def get_face_normals(self, indexed=None): if self._face_normals is None: v = self.get_vertices(indexed='faces') self._face_normals = np.cross(v[:, 1] - v[:, 0], v[:, 2] - v[:, 0]) if indexed is None: return self._face_normals elif indexed == 'faces': if self._face_normals_indexed_by_faces is None: norms = np.empty((self._face_normals.shape[0], 3, 3), dtype=np.float32) norms[:] = self._face_normals[:, np.newaxis, :] self._face_normals_indexed_by_faces = norms return self._face_normals_indexed_by_faces else: raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
Get face normals Parameters ---------- indexed : str | None If None, return an array (Nf, 3) of normal vectors for each face. If 'faces', then instead return an indexed array (Nf, 3, 3) (this is just the same array with each vector copied three times). Returns ------- normals : ndarray The normals.
def get_queryset(self): queryset = super(CachedViewMixin, self).get_queryset() if self.action in ('list', 'retrieve'): return CachedQueryset(self.get_queryset_cache(), queryset=queryset) else: return queryset
Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset
def _string_to_record_type(string): string = string.upper() record_type = getattr(RecordType, string) return record_type
Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType`
def get_callback_url(self, provider): info = self.model._meta.app_label, self.model._meta.model_name return reverse('admin:%s_%s_callback' % info, kwargs={'provider': provider.id})
Return the callback url for this provider.
def get(self): with self._mutex: entry = self._queue.pop() del self._block_map[entry[2]] return entry[2]
Get the highest priority Processing Block from the queue.
def delete_translations(self, language=None): from .models import Translation return Translation.objects.delete_translations(obj=self, language=language)
Deletes related translations.
def remove(self, key): encodedKey = json.dumps(key) sql = 'DELETE FROM ' + self.table + ' WHERE name = %s' with self.connect() as conn: with doTransaction(conn): return executeSQL(conn, sql, args=[encodedKey])
remove key from the namespace. it is fine to remove a key multiple times.
def call_once(func): argspec = inspect.getargspec(func) if argspec.args or argspec.varargs or argspec.keywords: raise ValueError('Can only decorate functions with no args', func, argspec) @functools.wraps(func) def _wrapper(): if not _wrapper.HasRun(): _wrapper.MarkAsRun() _wrapper.return_value = func() return _wrapper.return_value _wrapper.has_run = False _wrapper.HasRun = lambda: _wrapper.has_run _wrapper.MarkAsRun = lambda: setattr(_wrapper, 'has_run', True) return _wrapper
Decorate a function to only allow it to be called once. Note that it doesn't make sense to only call a function once if it takes arguments (use @functools.lru_cache for that sort of thing), so this only works on callables that take no args.
def translate(self): varnames = set() ident = self.ident funcnames = set([ident]) arg_exprs = [] for arg in self.args: subexprs, subvars, subfuncs = arg.translate() varnames.update(subvars) funcnames.update(subfuncs) arg_exprs.append(ex_call( ast.Attribute(ex_literal(u''), 'join', ast.Load()), [ex_call( 'map', [ ex_rvalue(str.__name__), ast.List(subexprs, ast.Load()), ] )], )) subexpr_call = ex_call( FUNCTION_PREFIX + ident, arg_exprs ) return [subexpr_call], varnames, funcnames
Compile the function call.
def remove_data_flow(self, data_flow_id, destroy=True): if data_flow_id not in self._data_flows: raise AttributeError("The data_flow_id %s does not exist" % str(data_flow_id)) self._data_flows[data_flow_id].parent = None return self._data_flows.pop(data_flow_id)
Removes a data flow from the container state :param int data_flow_id: the id of the data_flow to remove :raises exceptions.AttributeError: if the data_flow_id does not exist
def get_node_attribute(self, node, attribute_name): if not self.has_node(node): raise ValueError("No such node exists.") elif attribute_name not in self._node_attributes[node]: raise ValueError("No such attribute exists.") else: return copy.\ copy(self._node_attributes[node][attribute_name])
Given a node and the name of an attribute, get a copy of that node's attribute. :param node: reference to the node to retrieve the attribute of. :param attribute_name: name of the attribute to retrieve. :returns: attribute value of the attribute_name key for the specified node. :raises: ValueError -- No such node exists. :raises: ValueError -- No such attribute exists.
def parse(self, uri=None, fh=None, str_data=None, **kwargs): if (uri is not None): try: fh = URLopener().open(uri) except IOError as e: raise Exception( "Failed to load sitemap/sitemapindex from %s (%s)" % (uri, str(e))) elif (str_data is not None): fh = io.StringIO(str_data) elif ('str' in kwargs): self.logger.warn( "Legacy parse(str=...), use parse(str_data=...) instead") fh = io.StringIO(kwargs['str']) if (fh is None): raise Exception("Nothing to parse") s = self.new_sitemap() s.parse_xml( fh=fh, resources=self, capability=self.capability_name, sitemapindex=False) self.parsed_index = s.parsed_index
Parse a single XML document for this list. Accepts either a uri (uri or default if parameter not specified), or a filehandle (fh) or a string (str_data). Note that this method does not handle the case of a sitemapindex+sitemaps. LEGACY SUPPORT - the parameter str may be used in place of str_data but is deprecated and will be removed in a later version.
def fit(sim_mat, D_len, cidx): min_energy = np.inf for j in range(3): inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 for i in np.unique(inds): indsi = np.where(inds == i)[0] minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters
def normal_from_points(a, b, c): x1, y1, z1 = a x2, y2, z2 = b x3, y3, z3 = c ab = (x2 - x1, y2 - y1, z2 - z1) ac = (x3 - x1, y3 - y1, z3 - z1) x, y, z = cross(ab, ac) d = (x * x + y * y + z * z) ** 0.5 return (x / d, y / d, z / d)
Computes a normal vector given three points.
def zero_downtime_index(index_name, index_config): client = indices_client() temporary_name = index_name + '_' + str(uuid.uuid4()) logging.info('creating index with config %s', index_config) create_index(temporary_name, index_config, client) try: yield temporary_name atomic_swap(index_name, temporary_name, client) except Exception: logging.error( 'deleting temporary index %s due to error:', temporary_name, exc_info=True ) client.delete(index=temporary_name)
Context manager to create a new index based on a given alias, allow the caller to index it, and then point the alias to the new index Args: index_name (str) Name of an alias that should point to the new index index_config (dict) Configuration for the new index Yields: (name) The full name of the new index
def handle_line(self, line): if line.kind == ConfigLine.KIND_HEADER: self.enter_block(line.header) else: self.insert_line(line)
Read one line.
def one_symbol_ops_str(self) -> str: return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
Regex-escaped string with all one-symbol operators
def individual_weights(self): weights = self._raw_weights() if weights.shape[1] == 0: return np.zeros(weights.shape[0]) elif weights.shape[1] < self._ntaps: return np.mean(weights, axis=1) else: return weights.dot(self._filter_coeffs)
Read individual weights from the load cells in grams. Returns ------- weight : float The sensor weight in grams.
def set(self, source_id=None, profile_id=None, filter_id=None, stage=None, profile_reference=None, filter_reference=None): data = {} data["source_id"] = _validate_source_id(source_id) if profile_id: data["profile_id"] = _validate_profile_id(profile_id) if filter_id: data["filter_id"] = _validate_filter_id(filter_id) if profile_reference: data["profile_reference"] = _validate_profile_reference(profile_reference) if filter_reference: data["filter_reference"] = _validate_filter_reference(filter_reference) data["stage"] = _validate_stage(stage) response = self.client.patch('profile/stage', data=data) return response.json()
Edit the profile stage given a filter. Args: profile_id: <string> profile id body params: source_id: <string> source id associated to the profile filter_id: <string> filter id stage: <string> profiles' stage associated to the filter ( null for all, NEW, YES, LATER or NO). Returns Response that contains code 201 if successful Other status codes otherwise.
def search(self, buf): self._check_type(buf) normalized = unicodedata.normalize(self.FORM, buf) idx = normalized.find(self._text) if idx < 0: return None start = idx end = idx + len(self._text) return SequenceMatch(self, normalized[start:end], start, end)
Search the provided buffer for matching text. Search the provided buffer for matching text. If the *match* is found, returns a :class:`SequenceMatch` object, otherwise returns ``None``. :param buf: Buffer to search for a match. :return: :class:`SequenceMatch` if matched, None if no match was found.
def componentsintobranch(idf, branch, listofcomponents, fluid=None): if fluid is None: fluid = '' componentlist = [item[0] for item in listofcomponents] thebranchname = branch.Name thebranch = idf.removeextensibles('BRANCH', thebranchname) e_index = idf.getextensibleindex('BRANCH', thebranchname) theobj = thebranch.obj modeleditor.extendlist(theobj, e_index) for comp, compnode in listofcomponents: theobj.append(comp.key) theobj.append(comp.Name) inletnodename = getnodefieldname(comp, "Inlet_Node_Name", fluid=fluid, startswith=compnode) theobj.append(comp[inletnodename]) outletnodename = getnodefieldname(comp, "Outlet_Node_Name", fluid=fluid, startswith=compnode) theobj.append(comp[outletnodename]) theobj.append('') return thebranch
insert a list of components into a branch fluid is only needed if there are air and water nodes in same object fluid is Air or Water or ''. if the fluid is Steam, use Water
def _get_codes_for_values(values, categories): from pandas.core.algorithms import _get_data_algo, _hashtables dtype_equal = is_dtype_equal(values.dtype, categories.dtype) if dtype_equal: values = getattr(values, '_ndarray_values', values) categories = getattr(categories, '_ndarray_values', categories) elif (is_extension_array_dtype(categories.dtype) and is_object_dtype(values)): try: values = ( categories.dtype.construct_array_type()._from_sequence(values) ) except Exception: values = ensure_object(values) categories = ensure_object(categories) else: values = ensure_object(values) categories = ensure_object(categories) (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) (_, _), cats = _get_data_algo(categories, _hashtables) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats)
utility routine to turn values into codes given the specified categories
def tasks(self, pattern=None, negate=False, state=None, limit=None, reverse=True, params=None, success=False, error=True): request = clearly_pb2.FilterTasksRequest( tasks_filter=clearly_pb2.PatternFilter(pattern=pattern or '.', negate=negate), state_pattern=state or '.', limit=limit, reverse=reverse ) for task in about_time(ClearlyClient._fetched_callback, self._stub.filter_tasks(request)): ClearlyClient._display_task(task, params, success, error)
Filters stored tasks and displays their current statuses. Note that, to be able to list the tasks sorted chronologically, celery retrieves tasks from the LRU event heap instead of the dict storage, so the total number of tasks fetched may be different than the server `max_tasks` setting. For instance, the `limit` field refers to max events searched, not max tasks. Args: Filter args: pattern (Optional[str]): a pattern to filter tasks ex.: '^dispatch|^email' to filter names starting with that or 'dispatch.*123456' to filter that exact name and number or even '123456' to filter that exact number anywhere. negate (bool): if True, finds tasks that do not match criteria state (Optional[str]): a celery task state to filter limit (int): the maximum number of events to fetch if None or 0, fetches all. reverse (bool): if True (default), shows the most recent first Display args: params (Optional[bool]): if True shows args and kwargs in the first and last seen states, if False never shows, and if None follows the success and error arguments. default is None success (bool): if True shows successful tasks' results default is False error (bool): if True shows failed and retried tasks' tracebacks. default is True, as you're monitoring to find errors, right?
def underscore_to_camelcase(value, first_upper=True): value = str(value) camelized = "".join(x.title() if x else '_' for x in value.split("_")) if not first_upper: camelized = camelized[0].lower() + camelized[1:] return camelized
Transform string from underscore_string to camelCase. :param value: string with underscores :param first_upper: the result will have its first character in upper case :type value: str :return: string in CamelCase or camelCase according to the first_upper :rtype: str :Example: >>> underscore_to_camelcase('camel_case') 'CamelCase' >>> underscore_to_camelcase('camel_case', False) 'camelCase'
def slackpkg_update(self): NEW_ChangeLog_txt = URL(mirrors("ChangeLog.txt", "")).reading() if os.path.isfile(self.meta.slackpkg_lib_path + "ChangeLog.txt.old"): os.remove(self.meta.slackpkg_lib_path + "ChangeLog.txt.old") if os.path.isfile(self.meta.slackpkg_lib_path + "ChangeLog.txt"): shutil.copy2(self.meta.slackpkg_lib_path + "ChangeLog.txt", self.meta.slackpkg_lib_path + "ChangeLog.txt.old") os.remove(self.meta.slackpkg_lib_path + "ChangeLog.txt") with open(self.meta.slackpkg_lib_path + "ChangeLog.txt", "w") as log: log.write(NEW_ChangeLog_txt) log.close()
This replace slackpkg ChangeLog.txt file with new from Slackware official mirrors after update distribution.
def get_projection(self, axis): scale = axis.dot(self) / axis.dot(axis) return axis * scale
Return the projection of this vector onto the given axis. The axis does not need to be normalized.
def reboot(self, comment=None): self.make_request( NodeCommandFailed, method='update', resource='reboot', params={'comment': comment})
Send reboot command to this node. :param str comment: comment to audit :raises NodeCommandFailed: reboot failed with reason :return: None
def remove_role(role): def processor(action, argument): ActionRoles.query_by_action(action, argument=argument).filter( ActionRoles.role_id == role.id ).delete(synchronize_session=False) return processor
Remove a action for a role.
def is_config(python2=None, python3=None, windows=None, linux=None, osx=None): return ( python2 in (None, is_python2) and python3 in (None, is_python3) and windows in (None, is_windows) and linux in (None, is_linux) and osx in (None, is_osx) )
Check if a specific configuration of Python version and operating system matches the user's setup. Mostly used to display targeted error messages. python2 (bool): spaCy is executed with Python 2.x. python3 (bool): spaCy is executed with Python 3.x. windows (bool): spaCy is executed on Windows. linux (bool): spaCy is executed on Linux. osx (bool): spaCy is executed on OS X or macOS. RETURNS (bool): Whether the configuration matches the user's platform. DOCS: https://spacy.io/api/top-level#compat.is_config
def list_panes(pymux, variables): w = pymux.arrangement.get_active_window() active_pane = w.active_pane result = [] for i, p in enumerate(w.panes): process = p.process result.append('%i: [%sx%s] [history %s/%s] %s' % ( i, process.sx, process.sy, min(pymux.history_limit, process.screen.line_offset + process.sy), pymux.history_limit, ('(active)' if p == active_pane else ''))) result = '\n'.join(sorted(result)) pymux.get_client_state().layout_manager.display_popup('list-keys', result)
Display a list of all the panes.
def __track_job(self): while not self.__verify_job_has_started(): time.sleep(self.__POLL_TIME) self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start") self.__print_kubectl_hints() status = self.__get_job_status() while status == "RUNNING": self.__logger.debug("Kubernetes job " + self.uu_name + " is running") time.sleep(self.__POLL_TIME) status = self.__get_job_status() assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed" self.__logger.info("Kubernetes job " + self.uu_name + " succeeded") self.signal_complete()
Poll job status while active
def set(key, value, timeout = -1, adapter = MemoryAdapter): if adapter(timeout = timeout).set(key, pickle.dumps(value)): return value else: return None
set cache by code, must set timeout length
def multiThreadCommands(agent_list,command_list,num_jobs=None): if len(agent_list) == 1: multiThreadCommandsFake(agent_list,command_list) return None if num_jobs is None: num_jobs = min(len(agent_list),multiprocessing.cpu_count()) agent_list_out = Parallel(n_jobs=num_jobs)(delayed(runCommands)(*args) for args in zip(agent_list, len(agent_list)*[command_list])) for j in range(len(agent_list)): agent_list[j] = agent_list_out[j]
Executes the list of commands in command_list for each AgentType in agent_list using a multithreaded system. Each command should be a method of that AgentType subclass. Parameters ---------- agent_list : [AgentType] A list of instances of AgentType on which the commands will be run. command_list : [string] A list of commands to run for each AgentType in agent_list. Returns ------- None
def add_node_parents(root: ast.AST) -> None: for node in ast.walk(root): for child in ast.iter_child_nodes(node): child.parent = node
Adds "parent" attribute to all child nodes of passed node. Code taken from https://stackoverflow.com/a/43311383/1286705
def _get_domain_text_of_authoritative_zone(self): from bs4 import BeautifulSoup zones_response = self.session.get(self.URLS['domain_list']) self._log('Zone', zones_response) assert zones_response.status_code == 200, \ 'Could not retrieve domain list due to a network error.' html = BeautifulSoup(zones_response.content, 'html.parser') self._log('Zone', html) domain_table = html.find('table', {'id': 'cp_domain_table'}) assert domain_table is not None, 'Could not find domain table' domain = self.domain or '' domain_text = None subdomains = domain.split('.') while True: domain = '.'.join(subdomains) LOGGER.debug('Check if %s has own zone', domain) domain_text = domain_table.find(string=domain) if domain_text is not None or len(subdomains) < 3: break subdomains.pop(0) self.domain = domain assert domain_text is not None, \ 'The domain does not exist on Easyname.' return domain_text
Get the authoritative name zone.
def register(self, signal, description): return self.__app.signals.register(signal, self._plugin, description)
Registers a new signal. Only registered signals are allowed to be send. :param signal: Unique name of the signal :param description: Description of the reason or use case, why this signal is needed. Used for documentation.
def save(self, name): with open(name, 'wb+') as f: while True: buf = self._fileobj.read() if not buf: break f.write(buf)
Saves the entire Docker context tarball to a separate file. :param name: File path to save the tarball into. :type name: unicode | str
def validate_index(self, rdf_class): es_ids = set(self.get_es_ids()) tstore_ids = set([item[1] for item in self.get_uri_list(no_status=True)]) diff = es_ids - tstore_ids if diff: pdb.set_trace() action_list = self.es_worker.make_action_list(diff, action_type="delete") results = self.es_worker.bulk_save(action_list)
Will compare the triplestore and elasticsearch index to ensure that that elasticsearch and triplestore items match. elasticsearch records that are not in the triplestore will be deleteed
def _handle_response(self, url, res, suppress_empty=True): result = Connection._handle_response(self, url, res, suppress_empty) if 'X-Rate-Limit-Time-Reset-Ms' in res.headers: self.rate_limit = dict(ms_until_reset=int(res.headers['X-Rate-Limit-Time-Reset-Ms']), window_size_ms=int(res.headers['X-Rate-Limit-Time-Window-Ms']), requests_remaining=int(res.headers['X-Rate-Limit-Requests-Left']), requests_quota=int(res.headers['X-Rate-Limit-Requests-Quota'])) if self.rate_limiting_management: if self.rate_limiting_management['min_requests_remaining'] >= self.rate_limit['requests_remaining']: if self.rate_limiting_management['wait']: sleep(ceil(float(self.rate_limit['ms_until_reset']) / 1000)) if self.rate_limiting_management.get('callback_function'): callback = self.rate_limiting_management['callback_function'] args_dict = self.rate_limiting_management.get('callback_args') if args_dict: callback(args_dict) else: callback() return result
Adds rate limiting information on to the response object
def _populate_attributes(self, config, record, context, data): search_return_attributes = config['search_return_attributes'] for attr in search_return_attributes.keys(): if attr in record["attributes"]: if record["attributes"][attr]: data.attributes[search_return_attributes[attr]] = record["attributes"][attr] satosa_logging( logger, logging.DEBUG, "Setting internal attribute {} with values {}".format( search_return_attributes[attr], record["attributes"][attr] ), context.state ) else: satosa_logging( logger, logging.DEBUG, "Not setting internal attribute {} because value {} is null or empty".format( search_return_attributes[attr], record["attributes"][attr] ), context.state )
Use a record found in LDAP to populate attributes.
async def set_loop(self, loop_value): if loop_value not in ['on', 'off', 'shuffle']: self.statuslog.error("Loop value must be `off`, `on`, or `shuffle`") return self.loop_type = loop_value if self.loop_type == 'on': self.statuslog.info("Looping on") elif self.loop_type == 'off': self.statuslog.info("Looping off") elif self.loop_type == 'shuffle': self.statuslog.info("Looping on and shuffling")
Updates the loop value, can be 'off', 'on', or 'shuffle
def subclasses(cls, lst=None): if lst is None: lst = [] for sc in cls.__subclasses__(): if sc not in lst: lst.append(sc) subclasses(sc, lst=lst) return lst
Recursively gather subclasses of cls.
def run(self, module, dependency=False, kwargs={}): try: obj = self.load(module, kwargs) if isinstance(obj, binwalk.core.module.Module) and obj.enabled: obj.main() self.status.clear() if not dependency: self.executed_modules[module] = obj obj._unload_dependencies() obj.unload() except KeyboardInterrupt as e: if self.status.running: self.status.shutdown = True while not self.status.finished: time.sleep(0.1) raise e return obj
Runs a specific module.
def cdist_sq_periodic(ra, rb, L): return np.sum(np.square(csep_periodic(ra, rb, L)), axis=-1)
Return the squared distance between each point in on set, and every point in a second set, in periodic space. Parameters ---------- ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions. Two sets of points. L: float array, shape (d,) System lengths. Returns ------- cdist_sq: float array-like, shape (n, m, d) cdist_sq[i, j] is the squared distance between point j and point i.
def list_length(queue, backend='sqlite'): queue_funcs = salt.loader.queues(__opts__) cmd = '{0}.list_length'.format(backend) if cmd not in queue_funcs: raise SaltInvocationError('Function "{0}" is not available'.format(cmd)) ret = queue_funcs[cmd](queue=queue) return ret
Provide the number of items in a queue CLI Example: .. code-block:: bash salt-run queue.list_length myqueue salt-run queue.list_length myqueue backend=sqlite
def dataSource(self, value): if isinstance(value, DataSource): self._dataSource = value else: raise TypeError("value must be a DataSource object")
sets the datasource object
def astype(self, type_name): if type_name == 'nddata': return self.as_nddata() if type_name == 'hdu': return self.as_hdu() raise ValueError("Unrecognized conversion type '%s'" % (type_name))
Convert AstroImage object to some other kind of object.
def has(self, id, domain): assert isinstance(id, (str, unicode)) assert isinstance(domain, (str, unicode)) if self.defines(id, domain): return True if self.fallback_catalogue is not None: return self.fallback_catalogue.has(id, domain) return False
Checks if a message has a translation. @rtype: bool @return: true if the message has a translation, false otherwise
def set_log_type_level(self, logType, level): assert _is_number(level), "level must be a number" level = float(level) name = str(name) self.__logTypeLevels[logType] = level
Set a logtype logging level. :Parameters: #. logType (string): A defined logging type. #. level (number): The level of logging.
def range_hourly(start=None, stop=None, timezone='UTC', count=None): return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count)
This an alternative way to generating sets of Delorean objects with HOURLY stops
def get_objects(self): objects = {} for key in six.iterkeys(self.form_classes): objects[key] = None return objects
Returns dictionary with the instance objects for each form. Keys should match the corresponding form.
def update_vcs(self, fname, index): fpath = os.path.dirname(fname) branches, branch, files_modified = get_git_refs(fpath) text = branch if branch else '' if len(files_modified): text = text + ' [{}]'.format(len(files_modified)) self.setVisible(bool(branch)) self.set_value(text)
Update vcs status.
def EncodeMessageList(cls, message_list, packed_message_list): uncompressed_data = message_list.SerializeToString() packed_message_list.message_list = uncompressed_data compressed_data = zlib.compress(uncompressed_data) if len(compressed_data) < len(uncompressed_data): packed_message_list.compression = ( rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION) packed_message_list.message_list = compressed_data
Encode the MessageList into the packed_message_list rdfvalue.
def get_protein_group_content(pgmap, master): pg_content = [[0, master, protein, len(peptides), len([psm for pgpsms in peptides.values() for psm in pgpsms]), sum([psm[1] for pgpsms in peptides.values() for psm in pgpsms]), next(iter(next(iter(peptides.values()))))[3], next(iter(next(iter(peptides.values()))))[2], ] for protein, peptides in pgmap.items()] return pg_content
For each master protein, we generate the protein group proteins complete with sequences, psm_ids and scores. Master proteins are included in this group. Returns a list of [protein, master, pep_hits, psm_hits, protein_score], which is ready to enter the DB table.
def ls(ctx, available): "List installed datasets on path" path = ctx.obj['path'] global_ = ctx.obj['global_'] _ls(available=available, **ctx.obj)
List installed datasets on path
def pop_with_body_instrs(setup_with_instr, queue): body_instrs = popwhile(op.is_not(setup_with_instr.arg), queue, side='left') load_none = body_instrs.pop() expect(load_none, instrs.LOAD_CONST, "at end of with-block") pop_block = body_instrs.pop() expect(pop_block, instrs.POP_BLOCK, "at end of with-block") if load_none.arg is not None: raise DecompilationError( "Expected LOAD_CONST(None), but got " "%r instead" % (load_none) ) with_cleanup = queue.popleft() expect(with_cleanup, instrs.WITH_CLEANUP, "at end of with-block") end_finally = queue.popleft() expect(end_finally, instrs.END_FINALLY, "at end of with-block") return body_instrs
Pop instructions from `queue` that form the body of a with block.
def _plain_or_callable(obj): if callable(obj): return obj() elif isinstance(obj, types.GeneratorType): return next(obj) else: return obj
Returns the value of the called object of obj is a callable, otherwise the plain object. Returns None if obj is None. >>> obj = None >>> _plain_or_callable(obj) >>> stmt = 'select * from sys.nodes' >>> _plain_or_callable(stmt) 'select * from sys.nodes' >>> def _args(): ... return [1, 'name'] >>> _plain_or_callable(_args) [1, 'name'] >>> _plain_or_callable((x for x in range(10))) 0 >>> class BulkArgsGenerator: ... def __call__(self): ... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']] >>> _plain_or_callable(BulkArgsGenerator()) [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
def hscan(self, name, cursor='0', match=None, count=10): def value_function(): values = self.hgetall(name) values = list(values.items()) values.sort(key=lambda x: x[0]) return values scanned = self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0]) scanned[1] = dict(scanned[1]) return scanned
Emulate hscan.
def _validate_entity(entity): if entity['type'] == 'cluster': schema = ESXClusterEntitySchema.serialize() elif entity['type'] == 'vcenter': schema = VCenterEntitySchema.serialize() else: raise ArgumentValueError('Unsupported entity type \'{0}\'' ''.format(entity['type'])) try: jsonschema.validate(entity, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidEntityError(exc)
Validates the entity dict representation entity Dictionary representation of an entity. See ``_get_entity`` docstrings for format.
def get_ngrams(path): with open(path, encoding='utf-8') as fh: ngrams = [ngram.strip() for ngram in fh.readlines()] return ngrams
Returns a list of n-grams read from the file at `path`.
def get_extents(self, view, ranges, range_type='combined'): if range_type not in ('data', 'combined'): return (None,)*4 lower = -self.radius_outer upper = 2 * self.max_radius + self.radius_outer return (lower, lower, upper, upper)
Supply custom, static extents because radial heatmaps always have the same boundaries.
def buckets_delete(self, bucket): url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket) google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)
Issues a request to delete a bucket. Args: bucket: the name of the bucket. Raises: Exception if there is an error performing the operation.
def from_nibabel(nib_image): tmpfile = mktemp(suffix='.nii.gz') nib_image.to_filename(tmpfile) new_img = iio2.image_read(tmpfile) os.remove(tmpfile) return new_img
Convert a nibabel image to an ANTsImage
def check_config(data): is_right = True if "title" not in data: logging.error("No 'title' in _config.yml") is_right = False return is_right
Check if metadata is right TODO(crow): check more
def _set_listener(instance, obs): if obs.names is everything: names = list(instance._props) else: names = obs.names for name in names: if name not in instance._listeners: instance._listeners[name] = {typ: [] for typ in LISTENER_TYPES} instance._listeners[name][obs.mode] += [obs]
Add listeners to a HasProperties instance
def set_default_subject(self, subject): if not ( isinstance(subject, Subject) or isinstance(subject, (int, str,)) ): raise TypeError if isinstance(subject, Subject): _subject_id = subject.id else: _subject_id = str(subject) self.http_post( '{}/links/default_subject'.format(self.id), json={'default_subject': _subject_id}, )
Sets the subject's location media URL as a link. It displays as the default subject on PFE. - **subject** can be a single :py:class:`.Subject` instance or a single subject ID. Examples:: collection.set_default_subject(1234) collection.set_default_subject(Subject(1234))
def add_environment_information(meta): meta["timestamp"] = datetime.utcnow().isoformat(" ") meta["platform"] = platform.system() meta["release"] = platform.release() meta["python"] = platform.python_version() meta["packages"] = get_pkg_info("memote")
Record environment information.
def _create_factor_rule(tok): if tok[0] == 'IPV4': return IPV4Rule(tok[1]) if tok[0] == 'IPV6': return IPV6Rule(tok[1]) if tok[0] == 'DATETIME': return DatetimeRule(tok[1]) if tok[0] == 'TIMEDELTA': return TimedeltaRule(tok[1]) if tok[0] == 'INTEGER': return IntegerRule(tok[1]) if tok[0] == 'FLOAT': return FloatRule(tok[1]) if tok[0] == 'VARIABLE': return VariableRule(tok[1]) return ConstantRule(tok[1])
Simple helper method for creating factor node objects based on node name.
def resize_image(self, data, size): from machina.core.compat import PILImage as Image image = Image.open(BytesIO(data)) image.thumbnail(size, Image.ANTIALIAS) string = BytesIO() image.save(string, format='PNG') return string.getvalue()
Resizes the given image to fit inside a box of the given size.
def add_node_collection(self, node, collection): assert node in self.assigned_work if self.collection_is_completed: assert self.collection if collection != self.collection: other_node = next(iter(self.registered_collections.keys())) msg = report_collection_diff( self.collection, collection, other_node.gateway.id, node.gateway.id ) self.log(msg) return self.registered_collections[node] = list(collection)
Add the collected test items from a node. The collection is stored in the ``.registered_collections`` dictionary. Called by the hook: - ``DSession.worker_collectionfinish``.
def spi_configure(self, polarity, phase, bitorder): ret = api.py_aa_spi_configure(self.handle, polarity, phase, bitorder) _raise_error_if_negative(ret)
Configure the SPI interface.
def dispatch(table, args): if len(args) == 1: print_help(args[0], table) sys.exit(0) if args[1] not in table or len(args) != len(table[args[1]]) + 1: print_help(args[0], table, dest=sys.stderr) sys.exit(1) sig = table[args[1]] try: fixed_args = [type_(arg) for arg, type_ in zip(args[2:], sig[1:])] except TypeError: print_help(args[0], table, dest=sys.stderr) sys.exit(1) sig[0](*fixed_args)
Dispatches to a function based on the contents of `args`.
def set_sampled_topics(self, sampled_topics): assert sampled_topics.dtype == np.int and \ len(sampled_topics.shape) <= 2 if len(sampled_topics.shape) == 1: self.sampled_topics = \ sampled_topics.reshape(1, sampled_topics.shape[0]) else: self.sampled_topics = sampled_topics self.samples = self.sampled_topics.shape[0] self.tt = self.tt_comp(self.sampled_topics) self.dt = self.dt_comp(self.sampled_topics)
Allocate sampled topics to the documents rather than estimate them. Automatically generate term-topic and document-topic matrices.
def write(self, s): if not self.isalive(): raise EOFError('Pty is closed') if PY2: s = _unicode(s) success, nbytes = self.pty.write(s) if not success: raise IOError('Write failed') return nbytes
Write the string ``s`` to the pseudoterminal. Returns the number of bytes written.
def auto_select_categorical_features(X, threshold=10): feature_mask = [] for column in range(X.shape[1]): if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] unique = np.unique(X.data[indptr_start:indptr_end]) else: unique = np.unique(X[:, column]) feature_mask.append(len(unique) <= threshold) return feature_mask
Make a feature mask of categorical features in X. Features with less than 10 unique values are considered categorical. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. threshold : int Maximum number of unique values per feature to consider the feature to be categorical. Returns ------- feature_mask : array of booleans of size {n_features, }
def get_num_sequenced(study_id): data = {'cmd': 'getCaseLists', 'cancer_study_id': study_id} df = send_request(**data) if df.empty: return 0 row_filter = df['case_list_id'].str.contains('sequenced', case=False) num_case = len(df[row_filter]['case_ids'].tolist()[0].split(' ')) return num_case
Return number of sequenced tumors for given study. This is useful for calculating mutation statistics in terms of the prevalence of certain mutations within a type of cancer. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' Returns ------- num_case : int The number of sequenced tumors in the given study