code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def interpolate_where(self, condition): raise NotImplementedError() self[self < 0] = np.nan return self.interpolate()
Remove then interpolate across
def get_collections_for_image(self, image_id): result = [] for document in self.collection.find({'active' : True, 'images.identifier' : image_id}): result.append(str(document['_id'])) return result
Get identifier of all collections that contain a given image. Parameters ---------- image_id : string Unique identifierof image object Returns ------- List(string) List of image collection identifier
def contigs_to_positions(contigs, binning=10000): positions = np.zeros_like(contigs) index = 0 for _, chunk in itertools.groubpy(contigs): l = len(chunk) positions[index : index + l] = np.arange(list(chunk)) * binning index += l return positions
Build positions from contig labels From a list of contig labels and a binning parameter, build a list of positions that's essentially a concatenation of linspaces with step equal to the binning. Parameters ---------- contigs : list or array_like The list of contig labels, must be sorted. binning : int, optional The step for the list of positions. Default is 10000. Returns ------- positions : numpy.ndarray The piece-wise sorted list of positions
def update_token_tempfile(token): with open(tmp, 'w') as f: f.write(json.dumps(token, indent=4))
Example of function for token update
def get_value(repo_directory, key, expect_type=None): config = read_config(repo_directory) value = config.get(key) if expect_type and value is not None and not isinstance(value, expect_type): raise ConfigSchemaError('Expected config variable %s to be type %s, got %s' % (repr(key), repr(expect_type), repr(type(value)))) return value
Gets the value of the specified key in the config file.
def max_substring(words, position=0, _last_letter=''): try: letter = [word[position] for word in words] except IndexError: return _last_letter if all(l == letter[0] for l in letter) is True: _last_letter += max_substring(words, position=position + 1, _last_letter=letter[0]) return _last_letter else: return _last_letter
Finds max substring shared by all strings starting at position Args: words (list): list of unicode of all words to compare position (int): starting position in each word to begin analyzing for substring _last_letter (unicode): last common letter, only for use internally unless you really know what you are doing Returns: unicode: max str common to all words Examples: .. code-block:: Python >>> max_substring(['aaaa', 'aaab', 'aaac']) 'aaa' >>> max_substring(['abbb', 'bbbb', 'cbbb'], position=1) 'bbb' >>> max_substring(['abc', 'bcd', 'cde']) ''
def find_single(decl_matcher, decls, recursive=True): answer = matcher.find(decl_matcher, decls, recursive) if len(answer) == 1: return answer[0]
Returns a reference to the declaration, that match `decl_matcher` defined criteria. if a unique declaration could not be found the method will return None. :param decl_matcher: Python callable object, that takes one argument - reference to a declaration :param decls: the search scope, :class:declaration_t object or :class:declaration_t objects list t :param recursive: boolean, if True, the method will run `decl_matcher` on the internal declarations too
def addIndividual(self, individual): id_ = individual.getId() self._individualIdMap[id_] = individual self._individualIds.append(id_) self._individualNameMap[individual.getName()] = individual
Adds the specified individual to this dataset.
def cmdscale_fast(D, ndim): tasklogger.log_debug("Performing classic MDS on {} of shape {}...".format( type(D).__name__, D.shape)) D = D**2 D = D - D.mean(axis=0)[None, :] D = D - D.mean(axis=1)[:, None] pca = PCA(n_components=ndim, svd_solver='randomized') Y = pca.fit_transform(D) return Y
Fast CMDS using random SVD Parameters ---------- D : array-like, input data [n_samples, n_dimensions] ndim : int, number of dimensions in which to embed `D` Returns ------- Y : array-like, embedded data [n_sample, ndim]
def get_torrent(self, torrent_id): params = { 'page': 'download', 'tid': torrent_id, } r = requests.get(self.base_url, params=params) if r.headers.get('content-type') != 'application/x-bittorrent': raise TorrentNotFoundError(TORRENT_NOT_FOUND_TEXT) torrent_data = r.content return Torrent(torrent_id, torrent_data)
Gets the `.torrent` data for the given `torrent_id`. :param torrent_id: the ID of the torrent to download :raises TorrentNotFoundError: if the torrent does not exist :returns: :class:`Torrent` of the associated torrent
def invalidate(self, key, **kw): self.impl.invalidate(key, **self._get_cache_kw(kw, None))
Invalidate a value in the cache. :param key: the value's key. :param \**kw: cache configuration arguments. The backend is configured using these arguments upon first request. Subsequent requests that use the same series of configuration values will use that same backend.
def type(self): "The type of elements stored in the mapping." if self._type is None and len(self): self._type = self.values()[0].__class__ return self._type
The type of elements stored in the mapping.
def frame_to_latents(frame, hparams): frame = preprocess_frame(frame) glow_vals = glow_ops.encoder_decoder( "codec", frame, hparams, eps=None, reverse=False) z_top, _, level_eps, _, _ = glow_vals return z_top, level_eps
Encode frames to latents.
def _chooseBestSegmentPerColumn(cls, connections, matchingCells, allMatchingSegments, potentialOverlaps, cellsPerColumn): candidateSegments = connections.filterSegmentsByCell(allMatchingSegments, matchingCells) cellScores = potentialOverlaps[candidateSegments] columnsForCandidates = (connections.mapSegmentsToCells(candidateSegments) / cellsPerColumn) onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates) learningSegments = candidateSegments[onePerColumnFilter] return learningSegments
For all the columns covered by 'matchingCells', choose the column's matching segment with largest number of active potential synapses. When there's a tie, the first segment wins. @param connections (SparseMatrixConnections) @param matchingCells (numpy array) @param allMatchingSegments (numpy array) @param potentialOverlaps (numpy array)
def zremrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True): if not isinstance(min, bytes): raise TypeError("min argument must be bytes") if not isinstance(max, bytes): raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max return self.execute(b'ZREMRANGEBYLEX', key, min, max)
Remove all members in a sorted set between the given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes
def list_assets_ddo(self): return json.loads(self.requests_session.get(self.url).content)
List all the ddos registered in the aquarius instance. :return: List of DDO instance
def _check_whitespace(string): if string.count(' ') + string.count('\t') + string.count('\n') > 0: raise ValueError(INSTRUCTION_HAS_WHITESPACE)
Make sure thre is no whitespace in the given string. Will raise a ValueError if whitespace is detected
def beacon(config): ret = [] _refresh = False pkgs = [] for config_item in config: if 'pkgs' in config_item: pkgs += config_item['pkgs'] if 'refresh' in config and config['refresh']: _refresh = True for pkg in pkgs: _installed = __salt__['pkg.version'](pkg) _latest = __salt__['pkg.latest_version'](pkg, refresh=_refresh) if _installed and _latest: _pkg = {'pkg': pkg, 'version': _latest } ret.append(_pkg) return ret
Check if installed packages are the latest versions and fire an event for those that have upgrades. .. code-block:: yaml beacons: pkg: - pkgs: - zsh - apache2 - refresh: True
def _handle_tag_module_refresh(self, tag, data): self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) )
Handle a module_refresh event
def connect(self, id): schema = ConnectionSchema() resp = self.service.post(self.base+str(id)+'/connect/') return self.service.decode(schema, resp)
Open proxy connection to a device's management interface. :param id: Device ID as an int. :return: :class:`devices.Connection <devices.Connection>` object :rtype: devices.Connection
def result(self): for _ in range(self.num_threads): self.tasks_queue.put(None) self.tasks_queue.join() if not self.exceptions_queue.empty(): raise self.exceptions_queue.get() return self.results_queue
Stop threads and return the result of all called tasks
def raftery_lewis(x, q, r, s=.95, epsilon=.001, verbose=1): if np.ndim(x) > 1: return [raftery_lewis(y, q, r, s, epsilon, verbose) for y in np.transpose(x)] output = nmin, kthin, nburn, nprec, kmind = flib.gibbmain( x, q, r, s, epsilon) if verbose: print_("\n========================") print_("Raftery-Lewis Diagnostic") print_("========================") print_() print_( "%s iterations required (assuming independence) to achieve %s accuracy with %i percent probability." % (nmin, r, 100 * s)) print_() print_( "Thinning factor of %i required to produce a first-order Markov chain." % kthin) print_() print_( "%i iterations to be discarded at the beginning of the simulation (burn-in)." % nburn) print_() print_("%s subsequent iterations required." % nprec) print_() print_( "Thinning factor of %i required to produce an independence chain." % kmind) return output
Return the number of iterations needed to achieve a given precision. :Parameters: x : sequence Sampled series. q : float Quantile. r : float Accuracy requested for quantile. s (optional) : float Probability of attaining the requested accuracy (defaults to 0.95). epsilon (optional) : float Half width of the tolerance interval required for the q-quantile (defaults to 0.001). verbose (optional) : int Verbosity level for output (defaults to 1). :Return: nmin : int Minimum number of independent iterates required to achieve the specified accuracy for the q-quantile. kthin : int Skip parameter sufficient to produce a first-order Markov chain. nburn : int Number of iterations to be discarded at the beginning of the simulation, i.e. the number of burn-in iterations. nprec : int Number of iterations not including the burn-in iterations which need to be obtained in order to attain the precision specified by the values of the q, r and s input parameters. kmind : int Minimum skip parameter sufficient to produce an independence chain. :Example: >>> raftery_lewis(x, q=.025, r=.005) :Reference: Raftery, A.E. and Lewis, S.M. (1995). The number of iterations, convergence diagnostics and generic Metropolis algorithms. In Practical Markov Chain Monte Carlo (W.R. Gilks, D.J. Spiegelhalter and S. Richardson, eds.). London, U.K.: Chapman and Hall. See the fortran source file `gibbsit.f` for more details and references.
def toFloatArray(img): _D = {1: np.float32, 2: np.float32, 4: np.float64, 8: np.float64} return img.astype(_D[img.itemsize])
transform an unsigned integer array into a float array of the right size
def packageInfo(self): url = "%s/item.pkinfo" % self.root params = {'f' : 'json'} result = self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, out_folder=tempfile.gettempdir()) return result
gets the item's package information file
def _image2array(filepath): im = Pimage.open(filepath).convert('RGB') (width, height) = im.size _r = np.array(list(im.getdata(0)))/255.0 _g = np.array(list(im.getdata(1)))/255.0 _b = np.array(list(im.getdata(2)))/255.0 _r = _r.reshape((height, width)) _g = _g.reshape((height, width)) _b = _b.reshape((height, width)) return _r, _g, _b
Utility function that converts an image file in 3 np arrays that can be fed into geo_image.GeoImage in order to generate a PyTROLL GeoImage object.
def defer(callable): t = threading.Thread(target=callable) t.start() return t.join
Defers execution of the callable to a thread. For example: >>> def foo(): ... print('bar') >>> join = defer(foo) >>> join()
def delete(args): jm = setup(args) if not args.local and 'executing' in args.status: stop(args) jm.delete(job_ids=get_ids(args.job_ids), array_ids=get_ids(args.array_ids), delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir, status=args.status)
Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped.
def extant_item(arg, arg_type): if arg_type == "file": if not os.path.isfile(arg): raise argparse.ArgumentError( None, "The file {arg} does not exist.".format(arg=arg)) else: return arg elif arg_type == "directory": if not os.path.isdir(arg): raise argparse.ArgumentError( None, "The directory {arg} does not exist.".format(arg=arg)) else: return arg
Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory" Returns: If the file exists, return the filename or directory. Raises: If the file does not exist, raise a parser error.
def search_document_cache_key(self): return "elasticsearch_django:{}.{}.{}".format( self._meta.app_label, self._meta.model_name, self.pk )
Key used for storing search docs in local cache.
def shuffle_characters(s): s = list(s) random.shuffle(s) s =''.join(s) return s
Randomly shuffle the characters in a string
def text2sentences(text, labels): sentence = '' for i, label in enumerate(labels): if label == '1': if sentence: yield sentence sentence = '' else: sentence += text[i] if sentence: yield sentence
Splits given text at predicted positions from `labels`
def get_connections(self, id, connection_name, **args): return self.request( "{0}/{1}/{2}".format(self.version, id, connection_name), args )
Fetches the connections for given object.
def prjs_view_prj(self, *args, **kwargs): i = self.prjs_tablev.currentIndex() item = i.internalPointer() if item: prj = item.internal_data() self.view_prj(prj)
View the, in the projects table view selected, project. :returns: None :rtype: None :raises: None
def next(self): outgoing_msg = self.outgoing_msg_list.pop_first() if outgoing_msg is None: self.outgoing_msg_event.clear() self.outgoing_msg_event.wait() outgoing_msg = self.outgoing_msg_list.pop_first() return outgoing_msg
Pops and returns the first outgoing message from the list. If message list currently has no messages, the calling thread will be put to sleep until we have at-least one message in the list that can be popped and returned.
def inverse_deriv(self, z): return np.power(z, (1 - self.power)/self.power) / self.power
Derivative of the inverse of the power transform Parameters ---------- z : array-like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : array The value of the derivative of the inverse of the power transform function
def copy_without_prompts(self): text = self.get_selected_text() lines = text.split(os.linesep) for index, line in enumerate(lines): if line.startswith('>>> ') or line.startswith('... '): lines[index] = line[4:] text = os.linesep.join(lines) QApplication.clipboard().setText(text)
Copy text to clipboard without prompts
def plot(self): try: import matplotlib.pyplot as plt except ImportError: from sys import stderr print("ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function", file=stderr) raise x_min = np.min(self.knot_vector) x_max = np.max(self.knot_vector) x = np.linspace(x_min, x_max, num=1000) N = np.array([self(i) for i in x]).T for n in N: plt.plot(x,n) return plt.show()
Plot basis functions over full range of knots. Convenience function. Requires matplotlib.
def _find_module_ptr(self, module_ptr): ptr = cast(module_ptr, c_void_p).value for module in self._modules: if cast(module._ptr, c_void_p).value == ptr: return module return None
Find the ModuleRef corresponding to the given pointer.
def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10): points = [] length = 0 step = 1.0 / precision factors = range(0, precision + 1) for i in factors: points.append(_getCubicPoint(i * step, pt0, pt1, pt2, pt3)) for i in range(len(points) - 1): pta = points[i] ptb = points[i + 1] length += _distance(pta, ptb) return length
Estimate the length of this curve by iterating through it and averaging the length of the flat bits.
def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): " Fill the whole area of write_position with dots. " default_char = Char(' ', 'class:background') dot = Char('.', 'class:background') ypos = write_position.ypos xpos = write_position.xpos for y in range(ypos, ypos + write_position.height): row = screen.data_buffer[y] for x in range(xpos, xpos + write_position.width): row[x] = dot if (x + y) % 3 == 0 else default_char
Fill the whole area of write_position with dots.
def strip_suffix(id): suffix = get_suffix(id) if not suffix: return id return re.split(suffix, id)[0]
Split off any suffix from ID This mimics the old behavior of the Sample ID.
def _credit_card_type(self, card_type=None): if card_type is None: card_type = self.random_element(self.credit_card_types.keys()) elif isinstance(card_type, CreditCard): return card_type return self.credit_card_types[card_type]
Returns a random credit card type instance.
def get_collection(self, **kwargs): list_of_contents = [] self.refresh(**kwargs) if 'items' in self.__dict__: for item in self.items: if 'kind' not in item: list_of_contents.append(item) continue kind = item['kind'] if kind in self._meta_data['attribute_registry']: instance = self._meta_data['attribute_registry'][kind](self) instance._local_update(item) instance._activate_URI(instance.selfLink) list_of_contents.append(instance) else: error_message = '%r is not registered!' % kind raise UnregisteredKind(error_message) return list_of_contents
Get an iterator of Python ``Resource`` objects that represent URIs. The returned objects are Pythonic `Resource`s that map to the most recently `refreshed` state of uris-resources published by the device. In order to instantiate the correct types, the concrete subclass must populate its registry with acceptable types, based on the `kind` field returned by the REST server. .. note:: This method implies a single REST transaction with the Collection subclass URI. :raises: UnregisteredKind :returns: list of reference dicts and Python ``Resource`` objects
def required_arguments(func): defaults = default_values_of(func) args = arguments_of(func) if defaults: args = args[:-len(defaults)] return args
Return all arguments of a function that do not have a default value.
def create(cls, name, protocol_number, protocol_agent=None, comment=None): json = {'name': name, 'protocol_number': protocol_number, 'protocol_agent_ref': element_resolver(protocol_agent) or None, 'comment': comment} return ElementCreator(cls, json)
Create the IP Service :param str name: name of ip-service :param int protocol_number: ip proto number for this service :param str,ProtocolAgent protocol_agent: optional protocol agent for this service :param str comment: optional comment :raises CreateElementFailed: failure creating element with reason :return: instance with meta :rtype: IPService
def hget(self, key): data = self.r.hget(self.hash, key) if data is not None and not isinstance(data, str): data = str(self.r.hget(self.hash, key), 'utf-8') return data
Read data from Redis for the provided key. Args: key (string): The key to read in Redis. Returns: (any): The response data from Redis.
def initialize_directories(self, root_dir): if not root_dir: root_dir = os.path.expanduser('~') self.config_dir = os.path.join(root_dir, '.config/pueue') if not os.path.exists(self.config_dir): os.makedirs(self.config_dir)
Create all directories needed for logs and configs.
def CheckRegistryKey(javaKey): from _winreg import ConnectRegistry, HKEY_LOCAL_MACHINE, OpenKey, QueryValueEx path = None try: aReg = ConnectRegistry(None, HKEY_LOCAL_MACHINE) rk = OpenKey(aReg, javaKey) for i in range(1024): currentVersion = QueryValueEx(rk, "CurrentVersion") if currentVersion != None: key = OpenKey(rk, currentVersion[0]) if key != None: path = QueryValueEx(key, "JavaHome") return path[0] except Exception, err: WriteUcsWarning("Not able to access registry.") return None
Method checks for the java in the registry entries.
def reach_max_num(self): if self.signal.get('reach_max_num'): return True if self.max_num > 0 and self.fetched_num >= self.max_num: return True else: return False
Check if downloaded images reached max num. Returns: bool: if downloaded images reached max num.
def default_working_dir(): import nameset.virtualchain_hooks as virtualchain_hooks return os.path.expanduser('~/.{}'.format(virtualchain_hooks.get_virtual_chain_name()))
Get the default configuration directory for blockstackd
def build_request_include(include, params): params = params or OrderedDict() if include is not None: params['include'] = ','.join([cls._resource_type() for cls in include]) return params
Augment request parameters with includes. When one or all resources are requested an additional set of resources can be requested as part of the request. This function extends the given parameters for a request with a list of resource types passed in as a list of :class:`Resource` subclasses. Args: include([Resource class]): A list of resource classes to include params(dict): The (optional) dictionary of request parameters to extend Returns: An updated or new dictionary of parameters extended with an include query parameter.
def remove_leading_garbage_lines_from_reference_section(ref_sectn): p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE) while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])): ref_sectn.pop(0) return ref_sectn
Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses.
def __conn_listener(self, state): if state == KazooState.CONNECTED: self.__online = True if not self.__connected: self.__connected = True self._logger.info("Connected to ZooKeeper") self._queue.enqueue(self.on_first_connection) else: self._logger.warning("Re-connected to ZooKeeper") self._queue.enqueue(self.on_client_reconnection) elif state == KazooState.SUSPENDED: self._logger.warning("Connection suspended") self.__online = False elif state == KazooState.LOST: self.__online = False self.__connected = False if self.__stop: self._logger.info("Disconnected from ZooKeeper (requested)") else: self._logger.warning("Connection lost")
Connection event listener :param state: The new connection state
def get_pinned_version(ireq): try: specifier = ireq.specifier except AttributeError: raise TypeError("Expected InstallRequirement, not {}".format( type(ireq).__name__, )) if ireq.editable: raise ValueError("InstallRequirement is editable") if not specifier: raise ValueError("InstallRequirement has no version specification") if len(specifier._specs) != 1: raise ValueError("InstallRequirement has multiple specifications") op, version = next(iter(specifier._specs))._spec if op not in ('==', '===') or version.endswith('.*'): raise ValueError("InstallRequirement not pinned (is {0!r})".format( op + version, )) return version
Get the pinned version of an InstallRequirement. An InstallRequirement is considered pinned if: - Is not editable - It has exactly one specifier - That specifier is "==" - The version does not contain a wildcard Examples: django==1.8 # pinned django>1.8 # NOT pinned django~=1.8 # NOT pinned django==1.* # NOT pinned Raises `TypeError` if the input is not a valid InstallRequirement, or `ValueError` if the InstallRequirement is not pinned.
def type_check(self, filename): self.log.debug('type_check: in') self.editor.clean_errors() self.send_request( {"typehint": "TypecheckFilesReq", "files": [self.editor.path()]})
Update type checking when user saves buffer.
def predict_from_design_matrix(self, design_matrix): assert hasattr(self, 'betas'), 'no betas found, please run regression before prediction' assert design_matrix.shape[0] == self.betas.shape[0], \ 'designmatrix needs to have the same number of regressors as the betas already calculated' prediction = np.dot(self.betas.astype(np.float32).T, design_matrix.astype(np.float32)) return prediction
predict_from_design_matrix predicts signals given a design matrix. :param design_matrix: design matrix from which to predict a signal. :type design_matrix: numpy array, (nr_samples x betas.shape) :returns: predicted signal(s) :rtype: numpy array (nr_signals x nr_samples)
def decr(name, value=1, rate=1, tags=None): client().decr(name, value, rate, tags)
Decrement a metric by value. >>> import statsdecor >>> statsdecor.decr('my.metric')
def save(self, png_file): with open(png_file, 'wb') as f: f.write(self._repr_png_())
Save png to disk. Parameters ---------- png_file : path to file file to write to Notes ----- It relies on _repr_png_, so fix issues there.
def has(self, querypart_name, value=None): querypart = self._queryparts.get(querypart_name) if not querypart: return False if not querypart.is_set: return False if value: return querypart.has(value) return True
Returns True if `querypart_name` with `value` is set. For example you can check if you already used condition by `sql.has('where')`. If you want to check for more information, for example if that condition also contain ID, you can do this by `sql.has('where', 'id')`.
def get_next_invalid_day(self, timestamp): if self.is_time_day_invalid(timestamp): return timestamp next_future_timerange_invalid = self.get_next_future_timerange_invalid(timestamp) if next_future_timerange_invalid is None: (start_time, end_time) = self.get_start_and_end_time(get_day(timestamp)) else: (start_time, end_time) = self.get_start_and_end_time(timestamp) if next_future_timerange_invalid is not None: if start_time <= timestamp <= end_time: return get_day(timestamp) if start_time >= timestamp: return get_day(start_time) else: return get_day(end_time + 1) return None
Get next day where timerange is not active :param timestamp: time we compute from :type timestamp: int :return: timestamp of the next invalid day (midnight) in LOCAL time. :rtype: int | None
def steadystate(A, max_iter=100): P = np.linalg.matrix_power(A, max_iter) v = [] for i in range(len(P)): if not np.any([np.allclose(P[i], vi, ) for vi in v]): v.append(P[i]) return normalize(np.sum(v, axis=0))
Empirically determine the steady state probabilities from a stochastic matrix
def open(self): self.startTime = datetime.datetime.now() self.offset = 0 return self
Reset time and counts.
def _on_connection_finished(self, result): success, retval, context = self._parse_return(result) conn_id = context['connection_id'] callback = context['callback'] if success is False: callback(conn_id, self.id, False, 'Timeout opening connection') with self.count_lock: self.connecting_count -= 1 return handle = retval['handle'] context['disconnect_handler'] = self._on_connection_failed context['connect_time'] = time.time() context['state'] = 'preparing' self._connections[handle] = context self.probe_services(handle, conn_id, self._probe_services_finished)
Callback when the connection attempt to a BLE device has finished This function if called when a new connection is successfully completed Args: event (BGAPIPacket): Connection event
def predict_proba(self, X): y_probas = [] bce_logits_loss = isinstance( self.criterion_, torch.nn.BCEWithLogitsLoss) for yp in self.forward_iter(X, training=False): yp = yp[0] if isinstance(yp, tuple) else yp if bce_logits_loss: yp = torch.sigmoid(yp) y_probas.append(to_numpy(yp)) y_proba = np.concatenate(y_probas, 0) return y_proba
Where applicable, return probability estimates for samples. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_proba : numpy ndarray
def kill_cursors(self, cursor_ids, address=None): warnings.warn( "kill_cursors is deprecated.", DeprecationWarning, stacklevel=2) if not isinstance(cursor_ids, list): raise TypeError("cursor_ids must be a list") self.__kill_cursors_queue.append((address, cursor_ids))
DEPRECATED - Send a kill cursors message soon with the given ids. Raises :class:`TypeError` if `cursor_ids` is not an instance of ``list``. :Parameters: - `cursor_ids`: list of cursor ids to kill - `address` (optional): (host, port) pair of the cursor's server. If it is not provided, the client attempts to close the cursor on the primary or standalone, or a mongos server. .. versionchanged:: 3.3 Deprecated. .. versionchanged:: 3.0 Now accepts an `address` argument. Schedules the cursors to be closed on a background thread instead of sending the message immediately.
def load(cls, path): data = json.load(open(path)) weights = data['weights'] weights = np.asarray(weights, dtype=np.float64) s = cls(data['num_neurons'], data['data_dimensionality'], data['params']['lr']['orig'], neighborhood=data['params']['infl']['orig'], valfunc=data['valfunc'], argfunc=data['argfunc'], lr_lambda=data['params']['lr']['factor'], nb_lambda=data['params']['nb']['factor']) s.weights = weights s.trained = True return s
Load a SOM from a JSON file saved with this package. Parameters ---------- path : str The path to the JSON file. Returns ------- s : cls A som of the specified class.
def get_uri(endpoint_context, request, uri_type): if uri_type in request: verify_uri(endpoint_context, request, uri_type) uri = request[uri_type] else: try: _specs = endpoint_context.cdb[ str(request["client_id"])]["{}s".format(uri_type)] except KeyError: raise ParameterError( "Missing {} and none registered".format(uri_type)) else: if len(_specs) > 1: raise ParameterError( "Missing {} and more than one registered".format(uri_type)) else: uri = join_query(*_specs[0]) return uri
verify that the redirect URI is reasonable :param endpoint_context: :param request: The Authorization request :param uri_type: 'redirect_uri' or 'post_logout_redirect_uri' :return: redirect_uri
def set_hparams_from_args(args): if not args: return hp_prefix = "--hp_" tf.logging.info("Found unparsed command-line arguments. Checking if any " "start with %s and interpreting those as hparams " "settings.", hp_prefix) pairs = [] i = 0 while i < len(args): arg = args[i] if arg.startswith(hp_prefix): pairs.append((arg[len(hp_prefix):], args[i+1])) i += 2 else: tf.logging.warn("Found unknown flag: %s", arg) i += 1 as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) if FLAGS.hparams: as_hparams = "," + as_hparams FLAGS.hparams += as_hparams
Set hparams overrides from unparsed args list.
def metadata(self, key=None, database=None, table=None, fallback=True): assert not (database is None and table is not None), \ "Cannot call metadata() with table= specified but not database=" databases = self._metadata.get("databases") or {} search_list = [] if database is not None: search_list.append(databases.get(database) or {}) if table is not None: table_metadata = ( (databases.get(database) or {}).get("tables") or {} ).get(table) or {} search_list.insert(0, table_metadata) search_list.append(self._metadata) if not fallback: search_list = search_list[:1] if key is not None: for item in search_list: if key in item: return item[key] return None else: m = {} for item in search_list: m.update(item) return m
Looks up metadata, cascading backwards from specified level. Returns None if metadata value is not found.
def main(self, *args, **kwargs): try: result = super().main(*args, **kwargs) return result except Exception: if HAS_SENTRY: self._handle_sentry() if not (sys.stdin.isatty() and sys.stdout.isatty()): raise self._handle_github()
Catch all exceptions.
def lease(self, items): self._manager.leaser.add(items) self._manager.maybe_pause_consumer()
Add the given messages to lease management. Args: items(Sequence[LeaseRequest]): The items to lease.
def soft_error(self, message): self.print_usage(sys.stderr) args = {'prog': self.prog, 'message': message} self._print_message( _('%(prog)s: error: %(message)s\n') % args, sys.stderr)
Same as error, without the dying in a fire part.
def entryCheck(self, event = None, repair = True): valupr = self.choice.get().upper() if valupr.strip() == 'INDEF': self.choice.set(valupr) return EparOption.entryCheck(self, event, repair = repair)
Ensure any INDEF entry is uppercase, before base class behavior
def wrap(self, methodName, types, skip=2): def handler(fields): try: args = [ field if typ is str else int(field or 0) if typ is int else float(field or 0) if typ is float else bool(int(field or 0)) for (typ, field) in zip(types, fields[skip:])] method(*args) except Exception: self.logger.exception(f'Error for {methodName}:') method = getattr(self.wrapper, methodName, None) return handler if method else lambda *args: None
Create a message handler that invokes a wrapper method with the in-order message fields as parameters, skipping over the first ``skip`` fields, and parsed according to the ``types`` list.
def vequ(v1): v1 = stypes.toDoubleVector(v1) vout = stypes.emptyDoubleVector(3) libspice.vequ_c(v1, vout) return stypes.cVectorToPython(vout)
Make one double precision 3-dimensional vector equal to another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vequ_c.html :param v1: 3-dimensional double precision vector. :type v1: 3-Element Array of floats :return: 3-dimensional double precision vector set equal to vin. :rtype: 3-Element Array of floats
async def register(self): url = '{}/Sessions'.format(self.construct_url(API_URL)) params = {'api_key': self._api_key} reg = await self.api_request(url, params) if reg is None: self._registered = False _LOGGER.error('Unable to register emby client.') else: self._registered = True _LOGGER.info('Emby client registered!, Id: %s', self.unique_id) self._sessions = reg self.update_device_list(self._sessions) asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
Register library device id and get initial device list.
def determine_device(kal_out): device = "" while device == "": for line in kal_out.splitlines(): if "Using device " in line: device = str(line.split(' ', 2)[-1]) if device == "": device = None return device
Extract and return device from scan results.
def wrap(self, availWidth, availHeight): self.avWidth = availWidth self.avHeight = availHeight logger.debug("*** wrap (%f, %f)", availWidth, availHeight) if not self.text: logger.debug("*** wrap (%f, %f) needed", 0, 0) return 0, 0 width = availWidth self.splitIndex = self.text.splitIntoLines(width, availHeight) self.width, self.height = availWidth, self.text.height logger.debug("*** wrap (%f, %f) needed, splitIndex %r", self.width, self.height, self.splitIndex) return self.width, self.height
Determine the rectangle this paragraph really needs.
def _http_get(self, url): for try_number in range(self._http_retries + 1): response = requests.get(url, timeout=self._http_timeout) if response.status_code == 200: return response if (try_number >= self._http_retries or response.status_code not in (408, 500, 502, 503, 504)): if response.status_code >= 500: raise PythonKCMeetupsMeetupDown(response, response.content) if response.status_code == 400: try: data = json.loads(response.content) if data.get('code', None) == 'limit': raise PythonKCMeetupsRateLimitExceeded except: pass raise PythonKCMeetupsBadResponse(response, response.content)
Make an HTTP GET request to the specified URL and return the response. Retries ------- The constructor of this class takes an argument specifying the number of times to retry a GET. The statuses which are retried on are: 408, 500, 502, 503, and 504. Returns ------- An HTTP response, containing response headers and content. Exceptions ---------- * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsRateLimitExceeded
def stop_running_tasks(self): for task in self.__running_registry: task.stop() self.__running_registry.clear()
Terminate all the running tasks :return: None
def list_vmss(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VM Scale Sets in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of a list of scale set model views.
def to_indices(self, tokens): to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True indices = [self.token_to_idx[token] if token in self.token_to_idx else C.UNKNOWN_IDX for token in tokens] return indices[0] if to_reduce else indices
Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary.
def is_autosomal(chrom): try: int(chrom) return True except ValueError: try: int(str(chrom.lower().replace("chr", "").replace("_", "").replace("-", ""))) return True except ValueError: return False
Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22
def next(self): if self.r == self.repeats: self.i = (self.i + 1) % self.lenght self.r = 0 self.r += 1 if self.stopping and self.i == 0 and self.r == 1: self.stopped = True if self.i == 0 and self.stopped: raise StopIteration else: iterator = self.iterators[self.i] return iterator.next()
Returns the next element or raises ``StopIteration`` if stopped.
def _get_hit(self, key): try: result = self.cache[key] self.hits += 1 self._ref_key(key) return result except KeyError: pass result = self.weakrefs[key] self.refhits += 1 self.cache[key] = result self._ref_key(key) return result
Try to do a value lookup from the existing cache entries.
def encode_collection(collection): tree = etree.Element('collection') etree.SubElement(tree, 'source').text = collection.source etree.SubElement(tree, 'date').text = collection.date etree.SubElement(tree, 'key').text = collection.key encode_infons(tree, collection.infons) for doc in collection.documents: tree.append(encode_document(doc)) return tree
Encode a single collection.
def retrieve_api_token(self): payload = self.oauth2_manager.get_access_token_params( refresh_token=self.refresh_token ) response = requests.post( self.oauth2_manager.access_token_url, json=payload ) response.raise_for_status() response_json = json.loads(response.text) return response_json['access_token']
Retrieve the access token from AVS. This function is memoized, so the value returned by the function will be remembered and returned by subsequent calls until the memo expires. This is because the access token lasts for one hour, then a new token needs to be requested. Decorators: helpers.expiring_memo Returns: str -- The access token for communicating with AVS
def http_basic_auth_login_required(func=None): wrapper = auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_is_authenticated]) if func is None: return wrapper return wrapper(func)
Decorator. Use it to specify a RPC method is available only to logged users
def nextComment(self, text, start=0): m = min([self.lineComment(text, start), self.blockComment(text, start), self._emptylineregex.search(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
Return the next comment found in text starting at start.
def _snap_exec(commands): assert type(commands) == list retry_count = 0 return_code = None while return_code is None or return_code == SNAP_NO_LOCK: try: return_code = subprocess.check_call(['snap'] + commands, env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( 'Could not aquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code
Execute snap commands. :param commands: List commands :return: Integer exit code
def guest_get_info(self, userid): action = "get info of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.get_info(userid)
Get the status of a virtual machine. :param str userid: the id of the virtual machine :returns: Dictionary contains: power_state: (str) the running state, one of on | off max_mem_kb: (int) the maximum memory in KBytes allowed mem_kb: (int) the memory in KBytes used by the instance num_cpu: (int) the number of virtual CPUs for the instance cpu_time_us: (int) the CPU time used in microseconds
def tdSensor(self): protocol = create_string_buffer(20) model = create_string_buffer(20) sid = c_int() datatypes = c_int() self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model), byref(sid), byref(datatypes)) return {'protocol': self._to_str(protocol), 'model': self._to_str(model), 'id': sid.value, 'datatypes': datatypes.value}
Get the next sensor while iterating. :return: a dict with the keys: protocol, model, id, datatypes.
def _get_separated_values(self, secondary=False): series = self.secondary_series if secondary else self.series transposed = list(zip(*[serie.values for serie in series])) positive_vals = [ sum([val for val in vals if val is not None and val >= self.zero]) for vals in transposed ] negative_vals = [ sum([val for val in vals if val is not None and val < self.zero]) for vals in transposed ] return positive_vals, negative_vals
Separate values between positives and negatives stacked
def supervised_to_dict(dataset, text2self): def my_fn(inputs, targets): if text2self: return {"targets": targets} else: return {"inputs": inputs, "targets": targets} return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
Turns a supervised dataset into a dataset with a feature dictionary. if text2self, then the features dictionary contains a "targets" key. else, the features dictionary contains "inputs" and "targets" keys. Args: dataset: a tf.data.Dataset text2self: a boolean Returns: a tf.data.Dataset
def decimal_precision(row): try: row = list(row) for idx, x in enumerate(row): x = str(x) m = re.match(re_sci_notation, x) if m: _x2 = round(float(m.group(2)), 3) x = m.group(1) + str(_x2)[1:] + m.group(3) else: try: x = round(float(x), 3) except (ValueError, TypeError): x = x row[idx] = x row = tuple(row) except Exception as e: print("Error: Unable to fix the precision of values. File size may be larger than normal, {}".format(e)) return row
Change the "precision" of values before writing to CSV. Each value is rounded to 3 numbers. ex: 300 -> 300 ex: 300.123456 -> 300.123 ex: 3.123456e-25 - > 3.123e-25 :param tuple row: Row of numbers to process :return list row: Processed row
def requirements(self, requires): if requires: if isinstance(requires, basestring) and \ os.path.isfile(os.path.abspath(requires)): self._requirements_file = os.path.abspath(requires) else: if isinstance(self._requirements, basestring): requires = requires.split() self._requirements_file = None self._requirements = requires else: if os.path.isfile(self._requirements_file): return self._requirements, self._requirements_file = None, None
Sets the requirements for the package. It will take either a valid path to a requirements file or a list of requirements.
def add_subgraph(self, sgraph): if not isinstance(sgraph, Subgraph) and not isinstance(sgraph, Cluster): raise TypeError('add_subgraph() received a non subgraph class object:' + str(sgraph)) if self.obj_dict['subgraphs'].has_key(sgraph.get_name()): sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ] sgraph_list.append( sgraph.obj_dict ) else: self.obj_dict['subgraphs'][ sgraph.get_name() ] = [ sgraph.obj_dict ] sgraph.set_sequence( self.get_next_sequence_number() ) sgraph.set_parent_graph( self.get_parent_graph() )
Adds an subgraph object to the graph. It takes a subgraph object as its only argument and returns None.
def has_trivial_constructor(class_): class_ = class_traits.get_declaration(class_) trivial = find_trivial_constructor(class_) if trivial and trivial.access_type == 'public': return trivial
if class has public trivial constructor, this function will return reference to it, None otherwise
def _reset_internal(self): super()._reset_internal() self.sim.data.qpos[self._ref_joint_pos_indexes] = self.mujoco_robot.init_qpos if self.has_gripper_right: self.sim.data.qpos[ self._ref_joint_gripper_right_actuator_indexes ] = self.gripper_right.init_qpos if self.has_gripper_left: self.sim.data.qpos[ self._ref_joint_gripper_left_actuator_indexes ] = self.gripper_left.init_qpos
Resets the pose of the arm and grippers.
def _make_indices(self, Ns): N_new = int(Ns * self.n_splits) test = [np.full(N_new, False) for i in range(self.n_splits)] for i in range(self.n_splits): test[i][np.arange(Ns * i, Ns * (i + 1))] = True train = [np.logical_not(test[i]) for i in range(self.n_splits)] test = [np.arange(N_new)[test[i]] for i in range(self.n_splits)] train = [np.arange(N_new)[train[i]] for i in range(self.n_splits)] cv = list(zip(train, test)) return cv
makes indices for cross validation