positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def start(cls, _init_logging=True): """ Arrange for the subprocess to be started, if it is not already running. The parent process picks a UNIX socket path the child will use prior to fork, creates a socketpair used essentially as a semaphore, then blocks waiting for the child to indicate the UNIX socket is ready for use. :param bool _init_logging: For testing, if :data:`False`, don't initialize logging. """ if cls.worker_sock is not None: return if faulthandler is not None: faulthandler.enable() mitogen.utils.setup_gil() cls.unix_listener_path = mitogen.unix.make_socket_path() cls.worker_sock, cls.child_sock = socket.socketpair() atexit.register(lambda: clean_shutdown(cls.worker_sock)) mitogen.core.set_cloexec(cls.worker_sock.fileno()) mitogen.core.set_cloexec(cls.child_sock.fileno()) cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None if cls.profiling: mitogen.core.enable_profiling() if _init_logging: ansible_mitogen.logging.setup() cls.original_env = dict(os.environ) cls.child_pid = os.fork() if cls.child_pid: save_pid('controller') ansible_mitogen.logging.set_process_name('top') ansible_mitogen.affinity.policy.assign_controller() cls.child_sock.close() cls.child_sock = None mitogen.core.io_op(cls.worker_sock.recv, 1) else: save_pid('mux') ansible_mitogen.logging.set_process_name('mux') ansible_mitogen.affinity.policy.assign_muxprocess() cls.worker_sock.close() cls.worker_sock = None self = cls() self.worker_main()
Arrange for the subprocess to be started, if it is not already running. The parent process picks a UNIX socket path the child will use prior to fork, creates a socketpair used essentially as a semaphore, then blocks waiting for the child to indicate the UNIX socket is ready for use. :param bool _init_logging: For testing, if :data:`False`, don't initialize logging.
def dbscan(points, eps, minpts): """ Implementation of [DBSCAN]_ (*A density-based algorithm for discovering clusters in large spatial databases with noise*). It accepts a list of points (lat, lon) and returns the labels associated with the points. References ---------- .. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August). A density-based algorithm for discovering clusters in large spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231). """ next_label = 0 n = len(points) labels = [None] * n distance_matrix = compute_distance_matrix(points) neighbors = [get_neighbors(distance_matrix, i, eps) for i in range(n)] for i in range(n): if labels[i] is not None: continue if len(neighbors[i]) < minpts: continue labels[i] = next_label candidates = [i] while len(candidates) > 0: c = candidates.pop() for j in neighbors[c]: if labels[j] is None: labels[j] = next_label if len(neighbors[j]) >= minpts: candidates.append(j) next_label += 1 return labels
Implementation of [DBSCAN]_ (*A density-based algorithm for discovering clusters in large spatial databases with noise*). It accepts a list of points (lat, lon) and returns the labels associated with the points. References ---------- .. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August). A density-based algorithm for discovering clusters in large spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231).
def bread(stream): """ Decode a file or stream to an object. """ if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close()
Decode a file or stream to an object.
def run(expnum, ccd, version, dry_run=False, prefix="", force=False, ignore_dependency=False): """Run the OSSOS mopheader script. """ message = storage.SUCCESS logging.info("Attempting to get status on header for {} {}".format(expnum, ccd)) if storage.get_status(task, prefix, expnum, version, ccd) and not force: logging.info("{} completed successfully for {} {} {} {}".format(task, prefix, expnum, version, ccd)) return message with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run): try: logging.info("Building a mopheader ") if not storage.get_status(dependency, prefix, expnum, "p", 36) and not ignore_dependency: raise IOError("{} not yet run for {}".format(dependency, expnum)) # confirm destination directory exists. destdir = os.path.dirname(storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) # get image from the vospace storage area logging.info("Retrieving image from VOSpace") filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) # launch the stepZjmp program logging.info("Launching stepZ on %s %d" % (expnum, ccd)) expname = os.path.basename(filename).strip('.fits') logging.info(util.exec_prog(['stepZjmp', '-f', expname])) # if this is a dry run then we are finished if dry_run: return message # push the header to the VOSpace mopheader_filename = expname+".mopheader" destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader') source = mopheader_filename count = 0 with open(source, 'r'): while True: try: count += 1 logging.info("Attempt {} to copy {} -> {}".format(count, source, destination)) storage.copy(source, destination) break except Exception as ex: if count > 10: raise ex logging.info(message) except CalledProcessError as cpe: message = str(cpe.output) logging.error(message) except Exception as e: message = str(e) logging.error(message) if not dry_run: storage.set_status(task, prefix, expnum, version=version, ccd=ccd, status=message) return message
Run the OSSOS mopheader script.
def __struct_params_s(obj, separator=', ', f=repr, fmt='%s = %s'): """method wrapper for printing all elements of a struct""" s = separator.join([__single_param(obj, n, f, fmt) for n in dir(obj) if __inc_param(obj, n)]) return s
method wrapper for printing all elements of a struct
def find_interfaces(device, **kwargs): """ :param device: :return: """ interfaces = [] try: for cfg in device: try: interfaces.extend(usb_find_desc(cfg, find_all=True, **kwargs)) except: pass except: pass return interfaces
:param device: :return:
def storage_pools(self): """ Returns a `list` of all the `System` objects to the cluster. Updates every time - no caching. :return: a `list` of all the `System` objects known to the cluster. :rtype: list """ self.connection._check_login() response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/StoragePool/instances")).json() all_storage_pools = [] for storage_pool_object in response: all_storage_pools.append(SIO_Storage_Pool.from_dict(storage_pool_object)) return all_storage_pools
Returns a `list` of all the `System` objects to the cluster. Updates every time - no caching. :return: a `list` of all the `System` objects known to the cluster. :rtype: list
def show_Certificate(cert, short=False): """ Print Fingerprints, Issuer and Subject of an X509 Certificate. :param cert: X509 Certificate to print :param short: Print in shortform for DN (Default: False) :type cert: :class:`asn1crypto.x509.Certificate` :type short: Boolean """ print("SHA1 Fingerprint: {}".format(cert.sha1_fingerprint)) print("SHA256 Fingerprint: {}".format(cert.sha256_fingerprint)) print("Issuer: {}".format(get_certificate_name_string(cert.issuer.native, short=short))) print("Subject: {}".format(get_certificate_name_string(cert.subject.native, short=short)))
Print Fingerprints, Issuer and Subject of an X509 Certificate. :param cert: X509 Certificate to print :param short: Print in shortform for DN (Default: False) :type cert: :class:`asn1crypto.x509.Certificate` :type short: Boolean
def _fill_with_zeros(partials, rows, zero=None): """Find and return values from rows for all partials. In cases where no row matches a partial, zero is assumed as value. For a row, the first (n-1) fields are assumed to be the partial, and the last field, the value, where n is the total number of fields in each row. It is assumed that there is a unique row for each partial. partials -- single field values or tuples of field values rows -- table rows zero -- value used when no rows match a particular partial """ assert len(rows) > 0 if not _is_non_string_iterable(partials): # Convert partials to tuple for comparison against row slice later partials = [(partial,) for partial in partials] # Construct mapping of partials to values in rows mapping = {} for row in rows: mapping[tuple(row[:-1])] = row[-1] if zero is None: # Try to infer zero from given row values. array = np.array(tuple(mapping.values())) if len(array.shape) == 1: zero = array.dtype.type() return np.array([mapping.get(partial, zero) for partial in partials])
Find and return values from rows for all partials. In cases where no row matches a partial, zero is assumed as value. For a row, the first (n-1) fields are assumed to be the partial, and the last field, the value, where n is the total number of fields in each row. It is assumed that there is a unique row for each partial. partials -- single field values or tuples of field values rows -- table rows zero -- value used when no rows match a particular partial
def plot_movie(*args, **kwargs): """ Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. """ interactive = kwargs.pop('interactive', False) if interactive: plot_movie_with_elegans(*args, **kwargs) else: plot_movie_with_matplotlib(*args, **kwargs)
Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans.
async def _start_payloads(self): """Start all queued payloads""" with self._lock: for coroutine in self._payloads: task = self.event_loop.create_task(coroutine()) self._tasks.add(task) self._payloads.clear() await asyncio.sleep(0)
Start all queued payloads
def channels_voice_greeting_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/voice-api/greetings#create-greetings" api_path = "/api/v2/channels/voice/greetings.json" return self.call(api_path, method="POST", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/voice-api/greetings#create-greetings
def match(self, route): """ Match input route and return new Message instance with parsed content """ _resource = trim_resource(self.resource) self.method = self.method.lower() resource_match = route.resource_regex.search(_resource) if resource_match is None: return None # build params and querystring params = resource_match.groupdict() querystring = params.pop("querystring", "") setattr(self, "param", params) setattr(self, "query", parse_querystring(querystring)) return copy.deepcopy(self)
Match input route and return new Message instance with parsed content
def check_whitelist_blacklist(value, whitelist=None, blacklist=None): ''' Check a whitelist and/or blacklist to see if the value matches it. value The item to check the whitelist and/or blacklist against. whitelist The list of items that are white-listed. If ``value`` is found in the whitelist, then the function returns ``True``. Otherwise, it returns ``False``. blacklist The list of items that are black-listed. If ``value`` is found in the blacklist, then the function returns ``False``. Otherwise, it returns ``True``. If both a whitelist and a blacklist are provided, value membership in the blacklist will be examined first. If the value is not found in the blacklist, then the whitelist is checked. If the value isn't found in the whitelist, the function returns ``False``. ''' # Normalize the input so that we have a list if blacklist: if isinstance(blacklist, six.string_types): blacklist = [blacklist] if not hasattr(blacklist, '__iter__'): raise TypeError( 'Expecting iterable blacklist, but got {0} ({1})'.format( type(blacklist).__name__, blacklist ) ) else: blacklist = [] if whitelist: if isinstance(whitelist, six.string_types): whitelist = [whitelist] if not hasattr(whitelist, '__iter__'): raise TypeError( 'Expecting iterable whitelist, but got {0} ({1})'.format( type(whitelist).__name__, whitelist ) ) else: whitelist = [] _blacklist_match = any(expr_match(value, expr) for expr in blacklist) _whitelist_match = any(expr_match(value, expr) for expr in whitelist) if blacklist and not whitelist: # Blacklist but no whitelist return not _blacklist_match elif whitelist and not blacklist: # Whitelist but no blacklist return _whitelist_match elif blacklist and whitelist: # Both whitelist and blacklist return not _blacklist_match and _whitelist_match else: # No blacklist or whitelist passed return True
Check a whitelist and/or blacklist to see if the value matches it. value The item to check the whitelist and/or blacklist against. whitelist The list of items that are white-listed. If ``value`` is found in the whitelist, then the function returns ``True``. Otherwise, it returns ``False``. blacklist The list of items that are black-listed. If ``value`` is found in the blacklist, then the function returns ``False``. Otherwise, it returns ``True``. If both a whitelist and a blacklist are provided, value membership in the blacklist will be examined first. If the value is not found in the blacklist, then the whitelist is checked. If the value isn't found in the whitelist, the function returns ``False``.
def _PathList_key(self, pathlist): """ Returns the key for memoization of PathLists. Note that we want this to be pretty quick, so we don't completely canonicalize all forms of the same list. For example, 'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically represent the same list if you're executing from $ROOT, but we're not going to bother splitting strings into path elements, or massaging strings into Nodes, to identify that equivalence. We just want to eliminate obvious redundancy from the normal case of re-using exactly the same cloned value for a path. """ if SCons.Util.is_Sequence(pathlist): pathlist = tuple(SCons.Util.flatten(pathlist)) return pathlist
Returns the key for memoization of PathLists. Note that we want this to be pretty quick, so we don't completely canonicalize all forms of the same list. For example, 'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically represent the same list if you're executing from $ROOT, but we're not going to bother splitting strings into path elements, or massaging strings into Nodes, to identify that equivalence. We just want to eliminate obvious redundancy from the normal case of re-using exactly the same cloned value for a path.
def add_callback(self, cb): """ Register cb as a new callback. Will not register duplicates. """ if ((cb in self.callbacks) is False): self.callbacks.append(cb)
Register cb as a new callback. Will not register duplicates.
def probability_density(self, X): """Compute density function for given copula family.""" self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return np.multiply(U, V) else: a = np.power(np.multiply(U, V), -1) tmp = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta) b = np.power(tmp, -2 + 2.0 / self.theta) c = np.power(np.multiply(np.log(U), np.log(V)), self.theta - 1) d = 1 + (self.theta - 1) * np.power(tmp, -1.0 / self.theta) return self.cumulative_distribution(X) * a * b * c * d
Compute density function for given copula family.
def reconnect(self): ''' Try to reconnect and re-authenticate with the server. ''' log.debug('Closing the SSH socket.') try: self.ssl_skt.close() except socket.error: log.error('The socket seems to be closed already.') log.debug('Re-opening the SSL socket.') self.authenticate()
Try to reconnect and re-authenticate with the server.
def add_label_work_count(self): """Adds to each result row a count of the number of works within the label contain that n-gram. This counts works that have at least one witness carrying the n-gram. This correctly handles cases where an n-gram has only zero counts for a given work (possible with zero-fill followed by filtering by maximum count). """ self._logger.info('Adding label work count') def add_label_text_count(df): work_maxima = df.groupby(constants.WORK_FIELDNAME, sort=False).any() df.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = work_maxima[ constants.COUNT_FIELDNAME].sum() return df if self._matches.empty: self._matches[constants.LABEL_WORK_COUNT_FIELDNAME] = 0 else: self._matches.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = 0 self._matches = self._matches.groupby( [constants.LABEL_FIELDNAME, constants.NGRAM_FIELDNAME], sort=False).apply(add_label_text_count) self._logger.info('Finished adding label work count')
Adds to each result row a count of the number of works within the label contain that n-gram. This counts works that have at least one witness carrying the n-gram. This correctly handles cases where an n-gram has only zero counts for a given work (possible with zero-fill followed by filtering by maximum count).
def match(apikey, pcmiter, samplerate, duration, channels=2, metadata=None): """Given a PCM data stream, perform fingerprinting and look up the metadata for the audio. pcmiter must be an iterable of blocks of PCM data (buffers). duration is the total length of the track in seconds (an integer). metadata may be a dictionary containing existing metadata for the file (optional keys: "artist", "album", and "title"). Returns a list of track info dictionaries describing the candidate metadata returned by Last.fm. Raises a subclass of FingerprintError if any step fails. """ fpdata = extract(pcmiter, samplerate, channels) fpid = fpid_query(duration, fpdata, metadata) return metadata_query(fpid, apikey)
Given a PCM data stream, perform fingerprinting and look up the metadata for the audio. pcmiter must be an iterable of blocks of PCM data (buffers). duration is the total length of the track in seconds (an integer). metadata may be a dictionary containing existing metadata for the file (optional keys: "artist", "album", and "title"). Returns a list of track info dictionaries describing the candidate metadata returned by Last.fm. Raises a subclass of FingerprintError if any step fails.
def _min_depth(self): """Finds minimum path length from the root. Notes ----- Internal method. Do not call directly. Returns ------- int Minimum path length from the root. """ if "min_depth" in self.__dict__: return self.__dict__["min_depth"] min_depth = 0 hypernyms = self.hypernyms() if hypernyms: min_depth = 1 + min(h._min_depth() for h in hypernyms) self.__dict__["min_depth"] = min_depth return min_depth
Finds minimum path length from the root. Notes ----- Internal method. Do not call directly. Returns ------- int Minimum path length from the root.
def new(self, dev_t_high, dev_t_low): # type: (int, int) -> None ''' Create a new Rock Ridge POSIX device number record. Parameters: dev_t_high - The high-order 32-bits of the device number. dev_t_low - The low-order 32-bits of the device number. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('PN record already initialized!') self.dev_t_high = dev_t_high self.dev_t_low = dev_t_low self._initialized = True
Create a new Rock Ridge POSIX device number record. Parameters: dev_t_high - The high-order 32-bits of the device number. dev_t_low - The low-order 32-bits of the device number. Returns: Nothing.
def inSignJoy(self): """ Returns if the object is in its sign of joy. """ return props.object.signJoy[self.obj.id] == self.obj.sign
Returns if the object is in its sign of joy.
def set_config_item(self, key, value): """ Set a config key to a provided value. The value can be a list for the keys supporting multiple values. """ try: old_value = self.get_config_item(key) except KeyError: old_value = None # Get everything to unicode with python2 if isinstance(value, str): value = value.decode() elif isinstance(value, list): for i in range(len(value)): if isinstance(value[i], str): value[i] = value[i].decode() # Check if it's a list def set_key(key, value): self.clear_config_item(key) if isinstance(value, list): for entry in value: if not _lxc.Container.set_config_item(self, key, entry): return False else: _lxc.Container.set_config_item(self, key, value) set_key(key, value) new_value = self.get_config_item(key) # loglevel is special and won't match the string we set if key == "lxc.loglevel": new_value = value if (isinstance(value, unicode) and isinstance(new_value, unicode) and value == new_value): return True elif (isinstance(value, list) and isinstance(new_value, list) and set(value) == set(new_value)): return True elif (isinstance(value, unicode) and isinstance(new_value, list) and set([value]) == set(new_value)): return True elif old_value: set_key(key, old_value) return False else: self.clear_config_item(key) return False
Set a config key to a provided value. The value can be a list for the keys supporting multiple values.
def get(self, name): """Get the attribute with the given *name*. The returned object is a :class:`.Attribute` instance. Raises :exc:`ValueError` if no attribute has this name. Since multiple attributes can have the same name, we'll return the last match, since all but the last are ignored by the MediaWiki parser. """ for attr in reversed(self.attributes): if attr.name == name.strip(): return attr raise ValueError(name)
Get the attribute with the given *name*. The returned object is a :class:`.Attribute` instance. Raises :exc:`ValueError` if no attribute has this name. Since multiple attributes can have the same name, we'll return the last match, since all but the last are ignored by the MediaWiki parser.
def extension_elements_to_elements(extension_elements, schemas): """ Create a list of elements each one matching one of the given extension elements. This is of course dependent on the access to schemas that describe the extension elements. :param extension_elements: The list of extension elements :param schemas: Imported Python modules that represent the different known schemas used for the extension elements :return: A list of elements, representing the set of extension elements that was possible to match against a Class in the given schemas. The elements returned are the native representation of the elements according to the schemas. """ res = [] if isinstance(schemas, list): pass elif isinstance(schemas, dict): schemas = list(schemas.values()) else: return res for extension_element in extension_elements: for schema in schemas: inst = extension_element_to_element(extension_element, schema.ELEMENT_FROM_STRING, schema.NAMESPACE) if inst: res.append(inst) break return res
Create a list of elements each one matching one of the given extension elements. This is of course dependent on the access to schemas that describe the extension elements. :param extension_elements: The list of extension elements :param schemas: Imported Python modules that represent the different known schemas used for the extension elements :return: A list of elements, representing the set of extension elements that was possible to match against a Class in the given schemas. The elements returned are the native representation of the elements according to the schemas.
def check_access_token(self, request_token): """Checks that the token contains only safe characters and is no shorter than lower and no longer than upper. """ lower, upper = self.access_token_length return (set(request_token) <= self.safe_characters and lower <= len(request_token) <= upper)
Checks that the token contains only safe characters and is no shorter than lower and no longer than upper.
def calculate_convolution_output_shapes(operator): ''' Allowed input/output patterns are 1. [N, C, H, W] ---> [N, C, H', W'] ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) params = operator.raw_operator.convolution input_shape = operator.inputs[0].type.shape operator.outputs[0].type.shape = [0, 0, 0, 0] # Initialize output shape. It will be modified below. output_shape = operator.outputs[0].type.shape # Adjust N-axis output_shape[0] = input_shape[0] # Adjust C-axis output_shape[1] = params.outputChannels # Set up default and non-default parameters dilations = [1, 1] if len(params.dilationFactor) > 0: dilations = [params.dilationFactor[0], params.dilationFactor[1]] kernel_shape = [3, 3] if len(params.kernelSize) > 0: kernel_shape = params.kernelSize strides = [1, 1] if len(params.stride) > 0: strides = params.stride specified_output_shape = [0, 0] # Only used with convolution transpose if params.isDeconvolution and len(params.outputShape) > 0: specified_output_shape = list(int(i) for i in params.outputShape) pad_mode = params.WhichOneof('ConvolutionPaddingType') if pad_mode == 'valid' and len(params.valid.paddingAmounts.borderAmounts) > 0: pad_amounts = params.valid.paddingAmounts.borderAmounts pad_heads = [pad_amounts[0].startEdgeSize, pad_amounts[1].startEdgeSize] pad_tails = [pad_amounts[0].endEdgeSize, pad_amounts[1].endEdgeSize] else: # Padding amounts are useless for same padding and valid padding uses [0, 0] by default. pad_heads = [0, 0] pad_tails = [0, 0] # Adjust H- and W-axes for i in range(2): if params.isDeconvolution: output_shape[i + 2] = calculate_convolution_transpose_1D_output_shape( input_shape[i + 2], kernel_shape[i], dilations[i], strides[i], pad_mode, pad_heads[i], pad_tails[i], specified_output_shape[i]) else: output_shape[i + 2] = calculate_convolution_and_pooling_1D_output_shape( input_shape[i + 2], kernel_shape[i], dilations[i], strides[i], pad_mode, pad_heads[i], pad_tails[i])
Allowed input/output patterns are 1. [N, C, H, W] ---> [N, C, H', W']
def run(self): """Main logic for this thread to execute.""" if platform.system() == 'Windows': # Windows doesn't support file-like objects for select(), so fall back # to raw_input(). response = input(''.join((self._message, os.linesep, PROMPT))) self._answered = True self._callback(response) return # First, display the prompt to the console. console_output.cli_print(self._message, color=self._color, end=os.linesep, logger=None) console_output.cli_print(PROMPT, color=self._color, end='', logger=None) sys.stdout.flush() # Before reading, clear any lingering buffered terminal input. termios.tcflush(sys.stdin, termios.TCIFLUSH) line = '' while not self._stop_event.is_set(): inputs, _, _ = select.select([sys.stdin], [], [], 0.001) if sys.stdin in inputs: new = os.read(sys.stdin.fileno(), 1024) if not new: # Hit EOF! # They hit ^D (to insert EOF). Tell them to hit ^C if they # want to actually quit. print('Hit ^C (Ctrl+c) to exit.') break line += new.decode('utf-8') if '\n' in line: response = line[:line.find('\n')] self._answered = True self._callback(response) return
Main logic for this thread to execute.
def centerdistance(image, voxelspacing = None, mask = slice(None)): r""" Takes a simple or multi-spectral image and returns its voxel-wise center distance in mm. A multi-spectral image must be supplied as a list or tuple of its spectra. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. The center distance is the exact euclidean distance in mm of each voxels center to the central point of the overal image volume. Note that this feature is independent of the actual image content, but depends solely on its shape. Therefore always a one-dimensional feature is returned, even if a multi-spectral image has been supplied. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- centerdistance : ndarray The distance of each voxel to the images center. See Also -------- centerdistance_xdminus1 """ if type(image) == tuple or type(image) == list: image = image[0] return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing)
r""" Takes a simple or multi-spectral image and returns its voxel-wise center distance in mm. A multi-spectral image must be supplied as a list or tuple of its spectra. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. The center distance is the exact euclidean distance in mm of each voxels center to the central point of the overal image volume. Note that this feature is independent of the actual image content, but depends solely on its shape. Therefore always a one-dimensional feature is returned, even if a multi-spectral image has been supplied. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- centerdistance : ndarray The distance of each voxel to the images center. See Also -------- centerdistance_xdminus1
def receive(self, sock): """Receive a message on ``sock``.""" msg = None data = b'' recv_done = False recv_len = -1 while not recv_done: buf = sock.recv(BUFSIZE) if buf is None or len(buf) == 0: raise Exception("socket closed") if recv_len == -1: recv_len = struct.unpack('>I', buf[:4])[0] data += buf[4:] recv_len -= len(data) else: data += buf recv_len -= len(buf) recv_done = (recv_len == 0) msg = pickle.loads(data) return msg
Receive a message on ``sock``.
def isMember(userid, password, group): """Test to see if the given userid/password combo is an authenticated member of group. userid: CADC Username (str) password: CADC Password (str) group: CADC GMS group (str) """ try: certfile = getCert(userid, password) group_url = getGroupsURL(certfile, group) logging.debug("group url: %s" % ( group_url)) con = httplib.HTTPSConnection(_SERVER, 443, key_file=certfile.name, cert_file=certfile.name, timeout=600) con.connect() con.request("GET", group_url) resp = con.getresponse() if resp.status == 200: return True except Exception as e: logging.error(str(e)) #logging.debug(str(resp.status)) return False
Test to see if the given userid/password combo is an authenticated member of group. userid: CADC Username (str) password: CADC Password (str) group: CADC GMS group (str)
def flatten(self, obj): """Return a list with the field values """ return [self._serialize(f, obj) for f in self.fields]
Return a list with the field values
def nearby(word): ''' Nearby word ''' w = any2unicode(word) # read from cache if w in _cache_nearby: return _cache_nearby[w] words, scores = [], [] try: for x in _vectors.neighbours(w): words.append(x[0]) scores.append(x[1]) except: pass # ignore key error, OOV # put into cache _cache_nearby[w] = (words, scores) return words, scores
Nearby word
def kitchen_delete(backend, kitchen): """ Provide the name of the kitchen to delete """ click.secho('%s - Deleting kitchen %s' % (get_datetime(), kitchen), fg='green') master = 'master' if kitchen.lower() != master.lower(): check_and_print(DKCloudCommandRunner.delete_kitchen(backend.dki, kitchen)) else: raise click.ClickException('Cannot delete the kitchen called %s' % master)
Provide the name of the kitchen to delete
def searchsorted(self, value, side="left", sorter=None): """ Find indices where elements should be inserted to maintain order. .. versionadded:: 0.24.0 Find the indices into a sorted array `self` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Assuming that `self` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``self[i-1] < value <= self[i]`` right ``self[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- value : array_like Values to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints Array of insertion points with the same shape as `value`. See Also -------- numpy.searchsorted : Similar method from NumPy. """ # Note: the base tests provided by pandas only test the basics. # We do not test # 1. Values outside the range of the `data_for_sorting` fixture # 2. Values between the values in the `data_for_sorting` fixture # 3. Missing values. arr = self.astype(object) return arr.searchsorted(value, side=side, sorter=sorter)
Find indices where elements should be inserted to maintain order. .. versionadded:: 0.24.0 Find the indices into a sorted array `self` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Assuming that `self` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``self[i-1] < value <= self[i]`` right ``self[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- value : array_like Values to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints Array of insertion points with the same shape as `value`. See Also -------- numpy.searchsorted : Similar method from NumPy.
def i2c_read_request(self, address, register, number_of_bytes, read_type, cb=None, cb_type=None): """ This method issues an i2c read request for a single read,continuous read or a stop, specified by the read_type. Because different i2c devices return data at different rates, if a callback is not specified, the user must first call this method and then call i2c_read_data after waiting for sufficient time for the i2c device to respond. Some devices require that transmission be restarted (e.g. MMA8452Q accelerometer). Use I2C_READ | I2C_RESTART_TX for those cases. :param address: i2c device :param register: i2c register number :param number_of_bytes: number of bytes to be returned :param read_type: Constants.I2C_READ, Constants.I2C_READ_CONTINUOUSLY or Constants.I2C_STOP_READING. Constants.I2C_RESTART_TX may be OR'ed when required :param cb: optional callback reference :param cb_type: Constants.CB_TYPE_DIRECT = direct call or Constants.CB_TYPE_ASYNCIO = asyncio coroutine :returns: No return value """ task = asyncio.ensure_future(self.core.i2c_read_request(address, register, number_of_bytes, read_type, cb, cb_type)) self.loop.run_until_complete(task)
This method issues an i2c read request for a single read,continuous read or a stop, specified by the read_type. Because different i2c devices return data at different rates, if a callback is not specified, the user must first call this method and then call i2c_read_data after waiting for sufficient time for the i2c device to respond. Some devices require that transmission be restarted (e.g. MMA8452Q accelerometer). Use I2C_READ | I2C_RESTART_TX for those cases. :param address: i2c device :param register: i2c register number :param number_of_bytes: number of bytes to be returned :param read_type: Constants.I2C_READ, Constants.I2C_READ_CONTINUOUSLY or Constants.I2C_STOP_READING. Constants.I2C_RESTART_TX may be OR'ed when required :param cb: optional callback reference :param cb_type: Constants.CB_TYPE_DIRECT = direct call or Constants.CB_TYPE_ASYNCIO = asyncio coroutine :returns: No return value
def get_elementary_deformations(cryst, n=5, d=2): '''Generate elementary deformations for elastic tensor calculation. The deformations are created based on the symmetry of the crystal and are limited to the non-equivalet axes of the crystal. :param cryst: Atoms object, basic structure :param n: integer, number of deformations per non-equivalent axis :param d: float, size of the maximum deformation in percent and degrees :returns: list of deformed structures ''' # Deformation look-up table # Perhaps the number of deformations for trigonal # system could be reduced to [0,3] but better safe then sorry deform = { "Cubic": [[0, 3], regular], "Hexagonal": [[0, 2, 3, 5], hexagonal], "Trigonal": [[0, 1, 2, 3, 4, 5], trigonal], "Tetragonal": [[0, 2, 3, 5], tetragonal], "Orthorombic": [[0, 1, 2, 3, 4, 5], orthorombic], "Monoclinic": [[0, 1, 2, 3, 4, 5], monoclinic], "Triclinic": [[0, 1, 2, 3, 4, 5], triclinic] } lattyp, brav, sg_name, sg_nr = get_lattice_type(cryst) # Decide which deformations should be used axis, symm = deform[brav] systems = [] for a in axis: if a < 3: # tetragonal deformation for dx in linspace(-d, d, n): systems.append( get_cart_deformed_cell(cryst, axis=a, size=dx)) elif a < 6: # sheer deformation (skip the zero angle) for dx in linspace(d/10.0, d, n): systems.append( get_cart_deformed_cell(cryst, axis=a, size=dx)) return systems
Generate elementary deformations for elastic tensor calculation. The deformations are created based on the symmetry of the crystal and are limited to the non-equivalet axes of the crystal. :param cryst: Atoms object, basic structure :param n: integer, number of deformations per non-equivalent axis :param d: float, size of the maximum deformation in percent and degrees :returns: list of deformed structures
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkflowCumulativeStatisticsContext for this WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext """ if self._context is None: self._context = WorkflowCumulativeStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkflowCumulativeStatisticsContext for this WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext
def _flush_pending_events(self): ''' Send pending frames that are in the event queue. ''' while len(self._pending_events) and \ isinstance(self._pending_events[0], Frame): self._connection.send_frame(self._pending_events.popleft())
Send pending frames that are in the event queue.
def order_dict_by(dict_, key_order): r""" Reorders items in a dictionary according to a custom key order Args: dict_ (dict_): a dictionary key_order (list): custom key order Returns: OrderedDict: sorted_dict CommandLine: python -m utool.util_dict --exec-order_dict_by Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4} >>> key_order = [4, 2, 3, 1] >>> sorted_dict = order_dict_by(dict_, key_order) >>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),)) >>> print(result) >>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}' """ dict_keys = set(dict_.keys()) other_keys = dict_keys - set(key_order) key_order = it.chain(key_order, other_keys) sorted_dict = OrderedDict( (key, dict_[key]) for key in key_order if key in dict_keys ) return sorted_dict
r""" Reorders items in a dictionary according to a custom key order Args: dict_ (dict_): a dictionary key_order (list): custom key order Returns: OrderedDict: sorted_dict CommandLine: python -m utool.util_dict --exec-order_dict_by Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4} >>> key_order = [4, 2, 3, 1] >>> sorted_dict = order_dict_by(dict_, key_order) >>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),)) >>> print(result) >>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'
def fill_rect(framebuf, x, y, width, height, color): """Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws both the outline and interior.""" # pylint: disable=too-many-arguments while height > 0: index = (y >> 3) * framebuf.stride + x offset = y & 0x07 for w_w in range(width): framebuf.buf[index + w_w] = (framebuf.buf[index + w_w] & ~(0x01 << offset)) |\ ((color != 0) << offset) y += 1 height -= 1
Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws both the outline and interior.
def head(self, url, *args, **kwargs): """ Send HEAD request without checking the response. Note that ``_check_response`` is not called, as there will be no response body to check. :param str url: The URL to make the request to. """ with LOG_JWS_HEAD().context(): return DeferredContext( self._send_request(u'HEAD', url, *args, **kwargs) ).addActionFinish()
Send HEAD request without checking the response. Note that ``_check_response`` is not called, as there will be no response body to check. :param str url: The URL to make the request to.
def draw_latent_variables(self, nsims=5000): """ Draws latent variables from the model (for Bayesian inference) Parameters ---------- nsims : int How many draws to take Returns ---------- - np.ndarray of draws """ if self.latent_variables.estimation_method is None: raise Exception("No latent variables estimated!") elif self.latent_variables.estimation_method == 'BBVI': return np.array([i.q.draw_variable_local(size=nsims) for i in self.latent_variables.z_list]) elif self.latent_variables.estimation_method == "M-H": chain = np.array([self.latent_variables.z_list[i].sample for i in range(len(self.latent_variables.z_list))]) return chain[:,np.random.choice(chain.shape[1], nsims)] else: raise Exception("No latent variables estimated through Bayesian inference")
Draws latent variables from the model (for Bayesian inference) Parameters ---------- nsims : int How many draws to take Returns ---------- - np.ndarray of draws
def interrupt(self, interrupt): """Perform the shutdown of this server and save the exception.""" self._interrupt = True self.stop() self._interrupt = interrupt
Perform the shutdown of this server and save the exception.
def get_query_results(self, job_id, offset=None, limit=None, page_token=None, timeout=0): """Execute the query job indicated by the given job id. This is direct mapping to bigquery api https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults Parameters ---------- job_id : str The job id of the query to check offset : optional The index the result set should start at. limit : int, optional The maximum number of results to retrieve. page_token : optional Page token, returned by previous call, to request the next page of results. timeout : float, optional Timeout in seconds Returns ------- out The query reply """ job_collection = self.bigquery.jobs() return job_collection.getQueryResults( projectId=self.project_id, jobId=job_id, startIndex=offset, maxResults=limit, pageToken=page_token, timeoutMs=timeout * 1000).execute(num_retries=self.num_retries)
Execute the query job indicated by the given job id. This is direct mapping to bigquery api https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults Parameters ---------- job_id : str The job id of the query to check offset : optional The index the result set should start at. limit : int, optional The maximum number of results to retrieve. page_token : optional Page token, returned by previous call, to request the next page of results. timeout : float, optional Timeout in seconds Returns ------- out The query reply
def IPT_to_XYZ(cobj, *args, **kwargs): """ Converts IPT to XYZ. """ ipt_values = numpy.array(cobj.get_value_tuple()) lms_values = numpy.dot( numpy.linalg.inv(IPTColor.conversion_matrices['lms_to_ipt']), ipt_values) lms_prime = numpy.sign(lms_values) * numpy.abs(lms_values) ** (1 / 0.43) xyz_values = numpy.dot( numpy.linalg.inv(IPTColor.conversion_matrices['xyz_to_lms']), lms_prime) return XYZColor(*xyz_values, observer='2', illuminant='d65')
Converts IPT to XYZ.
def merge_related_samples(file_name, out_prefix, no_status): """Merge related samples. :param file_name: the name of the input file. :param out_prefix: the prefix of the output files. :param no_status: is there a status column in the file? :type file_name: str :type out_prefix: str :type no_status: boolean In the output file, there are a pair of samples per line. Hence, one can find related individuals by merging overlapping pairs. """ # What we need to save status = {} samples_sets = [] open_function = open if file_name.endswith(".gz"): open_function = gzip.open with open_function(file_name, 'rb') as input_file: header_index = dict([ (col_name, i) for i, col_name in enumerate(input_file.readline().rstrip("\r\n").split("\t")) ]) for col_name in {"FID1", "IID1", "FID2", "IID2"}: if col_name not in header_index: msg = "{}: no column named {}".format(file_name, col_name) raise ProgramError(msg) if not no_status: if "status" not in header_index: msg = "{}: no column named status".format(file_name) raise ProgramError(msg) for line in input_file: row = line.rstrip("\r\n").split("\t") sample_1 = (row[header_index["FID1"]], row[header_index["IID1"]]) sample_2 = (row[header_index["FID2"]], row[header_index["IID2"]]) tmp_set = {sample_1, sample_2} match = False for i in xrange(len(samples_sets)): if len(tmp_set & samples_sets[i]) > 0: # We have a match samples_sets[i] |= tmp_set match = True if not match: # We did not find a match, so we add samples_sets.append(tmp_set) # Check for the status the_status = "None" if not no_status: the_status = row[header_index["status"]] status[(sample_1, sample_2)] = the_status # Doing a final check final_samples_set = [] removed = set() for i in xrange(len(samples_sets)): if i in removed: # We removed this group continue group = samples_sets[i] j = i + 1 while j < len(samples_sets): if j in removed: j += 1 continue if len(group & samples_sets[j]) > 0: # We have a match, we start from the beginning group |= samples_sets[j] removed.add(j) j = i + 1 continue j += 1 final_samples_set.append(group) # Printing the output file output_file = None try: output_file = open(out_prefix + ".merged_related_individuals", 'w') to_print = ["index", "FID1", "IID1", "FID2", "IID2"] if not no_status: to_print.append("status") print >>output_file, "\t".join(to_print) except IOError: msg = "{}: can't write file".format(out_prefix + ".merged_related_individuals") raise ProgramError(msg) # Iterating on the groups chosen_samples = set() remaining_samples = set() for i, group in enumerate(final_samples_set): index = str(i+1) for sample_1, sample_2 in status.iterkeys(): if (sample_1 in group) and (sample_2 in group): to_print = [index, sample_1[0], sample_1[1], sample_2[0], sample_2[1]] if not no_status: to_print.append(status[(sample_1, sample_2)]) print >>output_file, "\t".join(to_print) # Choose a random sample from the group chosen = random.choice(list(group)) chosen_samples.add(chosen) remaining_samples |= group - {chosen} # Printing the files try: filename = out_prefix + ".chosen_related_individuals" with open(filename, "w") as chosen_file: for sample_id in chosen_samples: print >>chosen_file, "\t".join(sample_id) filename = out_prefix + ".discarded_related_individuals" with open(filename, "w") as discarded_file: for sample_id in remaining_samples: print >>discarded_file, "\t".join(sample_id) except IOError: msg = "{}: can't write files".format(out_prefix + ".*") raise ProgramError(msg) # Closing the output file output_file.close()
Merge related samples. :param file_name: the name of the input file. :param out_prefix: the prefix of the output files. :param no_status: is there a status column in the file? :type file_name: str :type out_prefix: str :type no_status: boolean In the output file, there are a pair of samples per line. Hence, one can find related individuals by merging overlapping pairs.
def solarReturnJD(jd, lon, forward=True): """ Finds the julian date before or after 'jd' when the sun is at longitude 'lon'. It searches forward by default. """ sun = swe.sweObjectLon(const.SUN, jd) if forward: dist = angle.distance(sun, lon) else: dist = -angle.distance(lon, sun) while abs(dist) > MAX_ERROR: jd = jd + dist / 0.9833 # Sun mean motion sun = swe.sweObjectLon(const.SUN, jd) dist = angle.closestdistance(sun, lon) return jd
Finds the julian date before or after 'jd' when the sun is at longitude 'lon'. It searches forward by default.
def beam_kev(self,get_error=False): """ Get the beam energy in kev, based on typical biases: itw (or ite bias) - bias15 - platform bias if get_error: fetch error in value, rather than value """ # get epics pointer epics = self.epics # fetch stds if get_error: attr = 'std' else: attr = 'mean' # get inital beam energy in keV beam = getattr(epics.target_bias,attr)/1000. # get RB cell voltage bias15 = getattr(epics.bias15,attr)/1000. # get platform bias if self.area == 'BNMR': platform = getattr(epics.nmr_bias,attr) elif self.area == 'BNQR': platform = getattr(epics.nqr_bias,attr)/1000. else: raise RuntimeError('Area not recognized') if get_error: return np.sqrt(np.sum(np.square((beam,bias15,platform)))) # keV else: return beam-bias15-platform
Get the beam energy in kev, based on typical biases: itw (or ite bias) - bias15 - platform bias if get_error: fetch error in value, rather than value
def load_factory(name, directory, configuration=None): """ Load a factory and have it initialize in a particular directory :param name: the name of the plugin to load :param directory: the directory where the factory will reside :return: """ for entry_point in pkg_resources.iter_entry_points(ENTRY_POINT): if entry_point.name == name: factory_class = entry_point.load(require=False) return factory_class(directory, configuration) raise KeyError
Load a factory and have it initialize in a particular directory :param name: the name of the plugin to load :param directory: the directory where the factory will reside :return:
def lang_direction(request): """ Sets lang_direction context variable to whether the language is RTL or LTR """ if lang_direction.rtl_langs is None: lang_direction.rtl_langs = getattr(settings, "RTL_LANGUAGES", set()) return {"lang_direction": "rtl" if request.LANGUAGE_CODE in lang_direction.rtl_langs else "ltr"}
Sets lang_direction context variable to whether the language is RTL or LTR
def _set_status_self(self, key=JobDetails.topkey, status=JobStatus.unknown): """Set the status of this job, both in self.jobs and in the `JobArchive` if it is present. """ fullkey = JobDetails.make_fullkey(self.full_linkname, key) if fullkey in self.jobs: self.jobs[fullkey].status = status if self._job_archive: self._job_archive.register_job(self.jobs[fullkey]) else: self._register_self('dummy.log', key, status)
Set the status of this job, both in self.jobs and in the `JobArchive` if it is present.
def _elim_adj(adj, n): """eliminates a variable, acting on the adj matrix of G, returning set of edges that were added. Parameters ---------- adj: dict A dict of the form {v: neighbors, ...} where v are vertices in a graph and neighbors is a set. Returns ---------- new_edges: set of edges that were added by eliminating v. """ neighbors = adj[n] new_edges = set() for u, v in itertools.combinations(neighbors, 2): if v not in adj[u]: adj[u].add(v) adj[v].add(u) new_edges.add((u, v)) new_edges.add((v, u)) for v in neighbors: adj[v].discard(n) del adj[n] return new_edges
eliminates a variable, acting on the adj matrix of G, returning set of edges that were added. Parameters ---------- adj: dict A dict of the form {v: neighbors, ...} where v are vertices in a graph and neighbors is a set. Returns ---------- new_edges: set of edges that were added by eliminating v.
def monitor(self, name, cb, request=None, notify_disconnect=False): """Create a subscription. :param str name: PV name string :param callable cb: Processing callback :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception. Specifically: Disconnected , RemoteError, or Cancelled :returns: a :py:class:`Subscription` instance The callable will be invoked with one argument which is either. * A p4p.Value (Subject to :py:ref:`unwrap`) * A sub-class of Exception (Disconnected , RemoteError, or Cancelled) """ R = Subscription(name, cb, notify_disconnect=notify_disconnect) cb = partial(cothread.Callback, R._event) R._S = super(Context, self).monitor(name, cb, request) return R
Create a subscription. :param str name: PV name string :param callable cb: Processing callback :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception. Specifically: Disconnected , RemoteError, or Cancelled :returns: a :py:class:`Subscription` instance The callable will be invoked with one argument which is either. * A p4p.Value (Subject to :py:ref:`unwrap`) * A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
def set_gcc(): """Try to use GCC on OSX for OpenMP support.""" # For macports and homebrew if 'darwin' in platform.platform().lower(): gcc = extract_gcc_binaries() if gcc is not None: os.environ["CC"] = gcc os.environ["CXX"] = gcc else: global use_openmp use_openmp = False logging.warning('No GCC available. Install gcc from Homebrew ' 'using brew install gcc.')
Try to use GCC on OSX for OpenMP support.
def __callServer(self, method="", params={}, data={}, callmethod='GET', content='application/json'): """ A private method to make HTTP call to the DBS Server :param method: REST API to call, e.g. 'datasets, blocks, files, ...'. :type method: str :param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}. :type params: dict :param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT. :type callmethod: str :param content: The type of content the server is expected to return. DBS3 only supports application/json :type content: str """ UserID = os.environ['USER']+'@'+socket.gethostname() try: UserAgent = "DBSClient/"+os.environ['DBS3_CLIENT_VERSION']+"/"+ self.userAgent except: UserAgent = "DBSClient/Unknown"+"/"+ self.userAgent request_headers = {"Content-Type": content, "Accept": content, "UserID": UserID, "User-Agent":UserAgent } method_func = getattr(self.rest_api, callmethod.lower()) data = cjson.encode(data) try: self.http_response = method_func(self.url, method, params, data, request_headers) except HTTPError as http_error: self.__parseForException(http_error) if content != "application/json": return self.http_response.body try: json_ret=cjson.decode(self.http_response.body) except cjson.DecodeError: print("The server output is not a valid json, most probably you have a typo in the url.\n%s.\n" % self.url, file=sys.stderr) raise dbsClientException("Invalid url", "Possible urls are %s" %self.http_response.body) return json_ret
A private method to make HTTP call to the DBS Server :param method: REST API to call, e.g. 'datasets, blocks, files, ...'. :type method: str :param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}. :type params: dict :param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT. :type callmethod: str :param content: The type of content the server is expected to return. DBS3 only supports application/json :type content: str
def mnest_basename(self): """Full path to basename """ if not hasattr(self, '_mnest_basename'): s = self.labelstring if s=='0_0': s = 'single' elif s=='0_0-0_1': s = 'binary' elif s=='0_0-0_1-0_2': s = 'triple' s = '{}-{}'.format(self.ic.name, s) self._mnest_basename = os.path.join('chains', s+'-') if os.path.isabs(self._mnest_basename): return self._mnest_basename else: return os.path.join(self.directory, self._mnest_basename)
Full path to basename
def account_pin(self, id): """ Pin / endorse a user. Returns a `relationship dict`_ containing the updated relationship to the user. """ id = self.__unpack_id(id) url = '/api/v1/accounts/{0}/pin'.format(str(id)) return self.__api_request('POST', url)
Pin / endorse a user. Returns a `relationship dict`_ containing the updated relationship to the user.
def build_git_url(self): """ get build git url. :return: build git url or None if not found """ # pylint: disable=len-as-condition if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.giturl return None
get build git url. :return: build git url or None if not found
def from_raster(cls, raster, properties, product='visual'): """Initialize a GeoFeature object with a GeoRaster Parameters ---------- raster : GeoRaster the raster in the feature properties : dict Properties. product : str product associated to the raster """ footprint = raster.footprint() assets = raster.to_assets(product=product) return cls(footprint, properties, assets)
Initialize a GeoFeature object with a GeoRaster Parameters ---------- raster : GeoRaster the raster in the feature properties : dict Properties. product : str product associated to the raster
def set_data(self, data): """Set table data""" if data is not None: self.model.set_data(data, self.dictfilter) self.sortByColumn(0, Qt.AscendingOrder)
Set table data
def import_parallel_gateway_to_graph(diagram_graph, process_id, process_attributes, element): """ Adds to graph the new element that represents BPMN parallel gateway. Parallel gateway doesn't have additional attributes. Separate method is used to improve code readability. :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML 'parallelGateway'. """ BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
Adds to graph the new element that represents BPMN parallel gateway. Parallel gateway doesn't have additional attributes. Separate method is used to improve code readability. :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML 'parallelGateway'.
def collapse_group_expr(groupx,cols,ret_row): "collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this." for i,col in enumerate(cols.children): if col==groupx: ret_row[i]=ret_row[i][0] return ret_row
collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this.
def send_message(self, chat_id, text, **options): """ Send a text message to chat :param int chat_id: ID of the chat to send the message to :param str text: Text to send :param options: Additional sendMessage options (see https://core.telegram.org/bots/api#sendmessage) """ return self.api_call("sendMessage", chat_id=chat_id, text=text, **options)
Send a text message to chat :param int chat_id: ID of the chat to send the message to :param str text: Text to send :param options: Additional sendMessage options (see https://core.telegram.org/bots/api#sendmessage)
def close_monomers(self, group, cutoff=4.0): """Returns a list of Monomers from within a cut off distance of the Monomer Parameters ---------- group: BaseAmpal or Subclass Group to be search for Monomers that are close to this Monomer. cutoff: float Distance cut off. Returns ------- nearby_residues: [Monomers] List of Monomers within cut off distance. """ nearby_residues = [] for self_atom in self.atoms.values(): nearby_atoms = group.is_within(cutoff, self_atom) for res_atom in nearby_atoms: if res_atom.parent not in nearby_residues: nearby_residues.append(res_atom.parent) return nearby_residues
Returns a list of Monomers from within a cut off distance of the Monomer Parameters ---------- group: BaseAmpal or Subclass Group to be search for Monomers that are close to this Monomer. cutoff: float Distance cut off. Returns ------- nearby_residues: [Monomers] List of Monomers within cut off distance.
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs): """ Takes a chamber and Congress, OR state and district, returning a list of members """ check_chamber(chamber) kwargs.update(chamber=chamber, congress=congress) if 'state' in kwargs and 'district' in kwargs: path = ("members/{chamber}/{state}/{district}/" "current.json").format(**kwargs) elif 'state' in kwargs: path = ("members/{chamber}/{state}/" "current.json").format(**kwargs) else: path = ("{congress}/{chamber}/" "members.json").format(**kwargs) return self.fetch(path, parse=lambda r: r['results'])
Takes a chamber and Congress, OR state and district, returning a list of members
def parse(cls, uri): """ Parse URI-string and return WURI object :param uri: string to parse :return: WURI """ uri_components = urlsplit(uri) adapter_fn = lambda x: x if x is not None and (isinstance(x, str) is False or len(x)) > 0 else None return cls( scheme=adapter_fn(uri_components.scheme), username=adapter_fn(uri_components.username), password=adapter_fn(uri_components.password), hostname=adapter_fn(uri_components.hostname), port=adapter_fn(uri_components.port), path=adapter_fn(uri_components.path), query=adapter_fn(uri_components.query), fragment=adapter_fn(uri_components.fragment), )
Parse URI-string and return WURI object :param uri: string to parse :return: WURI
def read_relative_file(filename): """ Return the contents of the given file. Its path is supposed relative to this module. """ path = join(dirname(abspath(__file__)), filename) with io.open(path, encoding='utf-8') as f: return f.read()
Return the contents of the given file. Its path is supposed relative to this module.
def compute_vest_stat(vest_dict, ref_aa, somatic_aa, codon_pos, stat_func=np.mean, default_val=0.0): """Compute missense VEST score statistic. Note: non-missense mutations are intentially not filtered out and will take a default value of zero. Parameters ---------- vest_dict : dict dictionary containing vest scores across the gene of interest ref_aa: list of str list of reference amino acids somatic_aa: list of str somatic mutation aa codon_pos : list of int position of codon in protein sequence stat_func : function, default=np.mean function that calculates a statistic default_val : float default value to return if there are no mutations Returns ------- score_stat : float vest score statistic for provided mutation list """ # return default value if VEST scores are missing if vest_dict is None: return default_val # fetch scores myscores = fetch_vest_scores(vest_dict, ref_aa, somatic_aa, codon_pos) # calculate mean score if myscores: score_stat = stat_func(myscores) else: score_stat = default_val return score_stat
Compute missense VEST score statistic. Note: non-missense mutations are intentially not filtered out and will take a default value of zero. Parameters ---------- vest_dict : dict dictionary containing vest scores across the gene of interest ref_aa: list of str list of reference amino acids somatic_aa: list of str somatic mutation aa codon_pos : list of int position of codon in protein sequence stat_func : function, default=np.mean function that calculates a statistic default_val : float default value to return if there are no mutations Returns ------- score_stat : float vest score statistic for provided mutation list
def do_max(environment, value, case_sensitive=False, attribute=None): """Return the largest item from the sequence. .. sourcecode:: jinja {{ [1, 2, 3]|max }} -> 3 :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Get the object with the max value of this attribute. """ return _min_or_max(environment, value, max, case_sensitive, attribute)
Return the largest item from the sequence. .. sourcecode:: jinja {{ [1, 2, 3]|max }} -> 3 :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Get the object with the max value of this attribute.
def pbs_for_set_with_merge(document_path, document_data, merge): """Make ``Write`` protobufs for ``set()`` methods. Args: document_path (str): A fully-qualified document path. document_data (dict): Property names and values to use for replacing a document. merge (Optional[bool] or Optional[List<apispec>]): If True, merge all fields; else, merge only the named fields. Returns: List[google.cloud.firestore_v1beta1.types.Write]: One or two ``Write`` protobuf instances for ``set()``. """ extractor = DocumentExtractorForMerge(document_data) extractor.apply_merge(merge) merge_empty = not document_data write_pbs = [] if extractor.has_updates or merge_empty: write_pbs.append( extractor.get_update_pb(document_path, allow_empty_mask=merge_empty) ) if extractor.transform_paths: transform_pb = extractor.get_transform_pb(document_path) write_pbs.append(transform_pb) return write_pbs
Make ``Write`` protobufs for ``set()`` methods. Args: document_path (str): A fully-qualified document path. document_data (dict): Property names and values to use for replacing a document. merge (Optional[bool] or Optional[List<apispec>]): If True, merge all fields; else, merge only the named fields. Returns: List[google.cloud.firestore_v1beta1.types.Write]: One or two ``Write`` protobuf instances for ``set()``.
def get_snapshot_command_history(self, name, limit=20, offset=0, view=None): """ Retrieve a list of commands triggered by a snapshot policy. @param name: The name of the snapshot policy. @param limit: Maximum number of commands to retrieve. @param offset: Index of first command to retrieve. @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'. @return: List of commands triggered by a snapshot policy. @since: API v6 """ params = { 'limit': limit, 'offset': offset, } if view: params['view'] = view return self._get("snapshots/policies/%s/history" % name, ApiSnapshotCommand, True, params=params, api_version=6)
Retrieve a list of commands triggered by a snapshot policy. @param name: The name of the snapshot policy. @param limit: Maximum number of commands to retrieve. @param offset: Index of first command to retrieve. @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'. @return: List of commands triggered by a snapshot policy. @since: API v6
def clear_coordinate_conditions(self): """stub""" if (self.get_zone_conditions_metadata().is_read_only() or self.get_zone_conditions_metadata().is_required()): raise NoAccess() self.my_osid_object_form._my_map['coordinateConditions'] = \ self._coordinate_conditions_metadata['default_object_values'][0]
stub
def data(self, data): """Called for text between tags""" if self.state == STATE_SOURCE_ID: self.context.audit_record.source_id = int(data) # Audit ids can be 64 bits elif self.state == STATE_DATETIME: dt = datetime.datetime.strptime(data, "%Y-%m-%dT%H:%M:%S") self.get_parent_element().datetimestamp = dt elif self.state == STATE_REASON_FOR_CHANGE: self.context.audit_record.reason_for_change = data.strip() or None # Convert a result of '' to None. self.state = STATE_NONE
Called for text between tags
def singleChoiceParam(parameters, name, type_converter = str): """ single choice parameter value. Returns -1 if no value was chosen. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'""" param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name)) value = int(param.find('Value').text) values = param.find('Values') if value < 0: return value return type_converter(values[value].text)
single choice parameter value. Returns -1 if no value was chosen. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response): """Builds the response to return to the caller.""" encoded_client_data = self._Base64Encode(client_data) signature_data = str(plugin_response['signatureData']) key_handle = str(plugin_response['keyHandle']) response = { 'clientData': encoded_client_data, 'signatureData': signature_data, 'applicationId': app_id, 'keyHandle': key_handle, } return response
Builds the response to return to the caller.
def plot_PSD(self, xlim=None, units="kHz", show_fig=True, timeStart=None, timeEnd=None, *args, **kwargs): """ plot the pulse spectral density. Parameters ---------- xlim : array_like, optional The x limits of the plotted PSD [LowerLimit, UpperLimit] Default value is [0, SampleFreq/2] units : string, optional Units of frequency to plot on the x axis - defaults to kHz show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created """ # self.get_PSD() if timeStart == None and timeEnd == None: freqs = self.freqs PSD = self.PSD else: freqs, PSD = self.get_PSD(timeStart=timeStart, timeEnd=timeEnd) unit_prefix = units[:-2] if xlim == None: xlim = [0, unit_conversion(self.SampleFreq/2, unit_prefix)] fig = _plt.figure(figsize=properties['default_fig_size']) ax = fig.add_subplot(111) ax.semilogy(unit_conversion(freqs, unit_prefix), PSD, *args, **kwargs) ax.set_xlabel("Frequency ({})".format(units)) ax.set_xlim(xlim) ax.grid(which="major") ax.set_ylabel("$S_{xx}$ ($V^2/Hz$)") if show_fig == True: _plt.show() return fig, ax
plot the pulse spectral density. Parameters ---------- xlim : array_like, optional The x limits of the plotted PSD [LowerLimit, UpperLimit] Default value is [0, SampleFreq/2] units : string, optional Units of frequency to plot on the x axis - defaults to kHz show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created
def reverse(viewname, subdomain=None, scheme=None, args=None, kwargs=None, current_app=None): """ Reverses a URL from the given parameters, in a similar fashion to :meth:`django.core.urlresolvers.reverse`. :param viewname: the name of URL :param subdomain: the subdomain to use for URL reversing :param scheme: the scheme to use when generating the full URL :param args: positional arguments used for URL reversing :param kwargs: named arguments used for URL reversing :param current_app: hint for the currently executing application """ urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF) domain = get_domain() if subdomain is not None: domain = '%s.%s' % (subdomain, domain) path = simple_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app) return urljoin(domain, path, scheme=scheme)
Reverses a URL from the given parameters, in a similar fashion to :meth:`django.core.urlresolvers.reverse`. :param viewname: the name of URL :param subdomain: the subdomain to use for URL reversing :param scheme: the scheme to use when generating the full URL :param args: positional arguments used for URL reversing :param kwargs: named arguments used for URL reversing :param current_app: hint for the currently executing application
def renavactive(request, pattern): """ {% renavactive request "^/a_regex" %} """ if re.search(pattern, request.path): return getattr(settings, "NAVHELPER_ACTIVE_CLASS", "active") return getattr(settings, "NAVHELPER_NOT_ACTIVE_CLASS", "")
{% renavactive request "^/a_regex" %}
def DbGetServerInfo(self, argin): """ Get info about host, mode and level for specified server :param argin: server name :type: tango.DevString :return: server info :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetServerInfo()") return self.db.get_server_info(argin)
Get info about host, mode and level for specified server :param argin: server name :type: tango.DevString :return: server info :rtype: tango.DevVarStringArray
def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools
def is_available(self): """ Returns `True` if *any* view handler in the class is currently available via its `is_available` method. """ if self.is_always_available: return True for viewname in self.__views__: if getattr(self, viewname).is_available(): return True return False
Returns `True` if *any* view handler in the class is currently available via its `is_available` method.
def local_error(self, originalValue, calculatedValue): """Calculates the error between the two given values. :param list originalValue: List containing the values of the original data. :param list calculatedValue: List containing the values of the calculated TimeSeries that corresponds to originalValue. :return: Returns the error measure of the two given values. :rtype: numeric """ originalValue = originalValue[0] calculatedValue = calculatedValue[0] return abs(originalValue - calculatedValue)
Calculates the error between the two given values. :param list originalValue: List containing the values of the original data. :param list calculatedValue: List containing the values of the calculated TimeSeries that corresponds to originalValue. :return: Returns the error measure of the two given values. :rtype: numeric
def application(handler, adapter_cls=WerkzeugAdapter): """Converts an anillo function based handler in a wsgi compiliant application function. :param adapter_cls: the wsgi adapter implementation (default: wekrzeug) :returns: wsgi function :rtype: callable """ adapter = adapter_cls() def wrapper(environ, start_response): request = adapter.to_request(environ) response = handler(request) response_func = adapter.from_response(response) return response_func(environ, start_response) return wrapper
Converts an anillo function based handler in a wsgi compiliant application function. :param adapter_cls: the wsgi adapter implementation (default: wekrzeug) :returns: wsgi function :rtype: callable
def show_link(self): '''show link information''' for master in self.mpstate.mav_master: linkdelay = (self.status.highest_msec - master.highest_msec)*1.0e-3 if master.linkerror: print("link %u down" % (master.linknum+1)) else: print("link %u OK (%u packets, %.2fs delay, %u lost, %.1f%% loss)" % (master.linknum+1, self.status.counters['MasterIn'][master.linknum], linkdelay, master.mav_loss, master.packet_loss()))
show link information
def validate_df(df, dm, con=None): """ Take in a DataFrame and corresponding data model. Run all validations for that DataFrame. Output is the original DataFrame with some new columns that contain the validation output. Validation columns start with: presence_pass_ (checking that req'd columns are present) type_pass_ (checking that the data is of the correct type) value_pass_ (checking that the value is within the appropriate range) group_pass_ (making sure that group validations pass) """ # check column validity required_one = {} # keep track of req'd one in group validations here cols = df.columns invalid_cols = [col for col in cols if col not in dm.index] # go through and run all validations for the data type for validation_name, validation in dm.iterrows(): value_type = validation['type'] if validation_name in df.columns: output = df[validation_name].apply(test_type, args=(value_type,)) df["type_pass" + "_" + validation_name + "_" + value_type] = output # val_list = validation['validations'] if not val_list or isinstance(val_list, float): continue for num, val in enumerate(val_list): func_name, arg = split_func(val) if arg == "magic_table_column": continue # first validate for presence if func_name in presence_operations: func = presence_operations[func_name] #grade = func(validation_name, df, arg, dm) grade = func(validation_name, arg, dm, df, con) pass_col_name = "presence_pass_" + validation_name + "_" + func.__name__ df[pass_col_name] = grade # then validate for correct values elif func_name in value_operations: func = value_operations[func_name] if validation_name in df.columns: grade = df.apply(func, args=(validation_name, arg, dm, df, con), axis=1) col_name = "value_pass_" + validation_name + "_" + func.__name__ if col_name in df.columns: num_range = list(range(1, 10)) for num in num_range: if (col_name + str(num)) in df.columns: continue else: col_name = col_name + str(num) break df[col_name] = grade.astype(object) # last, validate at the column group level elif func_name in group_operations: func = group_operations[func_name] missing = func(validation_name, arg, dm, df) if arg not in required_one: required_one[arg] = [missing] else: required_one[arg].append(missing) # format the group validation columns for key, value in list(required_one.items()): if None in value: # this means at least one value from the required group is present, # so the validation passes continue else: # otherwise, all of the values from the required group are missing, # so the validation fails df["group_pass_{}".format(key)] = "you must have one column from group {}: {}".format(key, ", ".join(value)) return df
Take in a DataFrame and corresponding data model. Run all validations for that DataFrame. Output is the original DataFrame with some new columns that contain the validation output. Validation columns start with: presence_pass_ (checking that req'd columns are present) type_pass_ (checking that the data is of the correct type) value_pass_ (checking that the value is within the appropriate range) group_pass_ (making sure that group validations pass)
def from_sequence(cls, sequence, phos_3_prime=False): """Creates a DNA duplex from a nucleotide sequence. Parameters ---------- sequence: str Nucleotide sequence. phos_3_prime: bool, optional If false the 5' and the 3' phosphor will be omitted. """ strand1 = NucleicAcidStrand(sequence, phos_3_prime=phos_3_prime) duplex = cls(strand1) return duplex
Creates a DNA duplex from a nucleotide sequence. Parameters ---------- sequence: str Nucleotide sequence. phos_3_prime: bool, optional If false the 5' and the 3' phosphor will be omitted.
def hdel(self, *args): """ This command on the model allow deleting many instancehash fields with only one redis call. You must pass hash names to retrieve as arguments """ if args and not any(arg in self._instancehash_fields for arg in args): raise ValueError("Only InstanceHashField can be used here.") # Set indexes for indexable fields. for field_name in args: field = self.get_field(field_name) if field.indexable: field.deindex() # Return the number of fields really deleted return self._call_command('hdel', *args)
This command on the model allow deleting many instancehash fields with only one redis call. You must pass hash names to retrieve as arguments
def json2pattern(s): """ Convert JSON format to a query pattern. Includes even mongo shell notation without quoted key names. """ # make valid JSON by wrapping field names in quotes s, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:', ' \\1 "\\2" : ', s) # handle shell values that are not valid JSON s = shell2json(s) # convert to 1 where possible, to get rid of things like new Date(...) s, n = re.subn(r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])', '\\1 1 \\3', s) # now convert to dictionary, converting unicode to ascii try: doc = json.loads(s, object_hook=_decode_pattern_dict) return json.dumps(doc, sort_keys=True, separators=(', ', ': ')) except ValueError as ex: return None
Convert JSON format to a query pattern. Includes even mongo shell notation without quoted key names.
def log_request(self, code="-", size="-"): """ Logs a request to the server """ self._service.log(logging.DEBUG, '"%s" %s', self.requestline, code)
Logs a request to the server
def write_fc3_to_hdf5(fc3, filename='fc3.hdf5', p2s_map=None, compression=None): """Write third-order force constants in hdf5 format. Parameters ---------- force_constants : ndarray Force constants shape=(n_satom, n_satom, n_satom, 3, 3, 3) or (n_patom, n_satom, n_satom,3,3,3), dtype=double filename : str Filename to be used. p2s_map : ndarray, optional Primitive atom indices in supercell index system shape=(n_patom,), dtype=intc compression : str or int, optional h5py's lossless compression filters (e.g., "gzip", "lzf"). See the detail at docstring of h5py.Group.create_dataset. Default is None. """ with h5py.File(filename, 'w') as w: w.create_dataset('fc3', data=fc3, compression=compression) if p2s_map is not None: w.create_dataset('p2s_map', data=p2s_map)
Write third-order force constants in hdf5 format. Parameters ---------- force_constants : ndarray Force constants shape=(n_satom, n_satom, n_satom, 3, 3, 3) or (n_patom, n_satom, n_satom,3,3,3), dtype=double filename : str Filename to be used. p2s_map : ndarray, optional Primitive atom indices in supercell index system shape=(n_patom,), dtype=intc compression : str or int, optional h5py's lossless compression filters (e.g., "gzip", "lzf"). See the detail at docstring of h5py.Group.create_dataset. Default is None.
def index_run(record_path, keep_json, check_duplicate): """ Convert raw JSON records into sqlite3 DB. Normally RASH launches a daemon that takes care of indexing. See ``rash daemon --help``. """ from .config import ConfigStore from .indexer import Indexer cfstore = ConfigStore() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all()
Convert raw JSON records into sqlite3 DB. Normally RASH launches a daemon that takes care of indexing. See ``rash daemon --help``.
def create_string_array(self, key, value): """Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. """ data = None if key is not None and value is not None: if isinstance(value, (list)): data = self.db.create(key.strip(), json.dumps(value)) else: # used to save raw value with embedded variables data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
def value(self): """convenience method to just get one value or tuple of values for the query""" field_vals = None field_names = self.fields_select.names() fcount = len(field_names) if fcount: d = self._query('get_one') if d: field_vals = [d.get(fn, None) for fn in field_names] if fcount == 1: field_vals = field_vals[0] else: raise ValueError("no select fields were set, so cannot return value") return field_vals
convenience method to just get one value or tuple of values for the query
def _get_coord_cell_node_coord(self, coord, coords=None, nans=None, var=None): """ Get the boundaries of an unstructed coordinate Parameters ---------- coord: xr.Variable The coordinate whose bounds should be returned %(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s Returns ------- %(CFDecoder.get_cell_node_coord.returns)s """ bounds = coord.attrs.get('bounds') if bounds is not None: bounds = self.ds.coords.get(bounds) if bounds is not None: if coords is not None: bounds = bounds.sel(**{ key: coords[key] for key in set(coords).intersection(bounds.dims)}) if nans is not None and var is None: raise ValueError("Need the variable to deal with NaN!") elif nans is None: pass elif nans == 'skip': bounds = bounds[~np.isnan(var.values)] elif nans == 'only': bounds = bounds[np.isnan(var.values)] else: raise ValueError( "`nans` must be either None, 'skip', or 'only'! " "Not {0}!".format(str(nans))) return bounds
Get the boundaries of an unstructed coordinate Parameters ---------- coord: xr.Variable The coordinate whose bounds should be returned %(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s Returns ------- %(CFDecoder.get_cell_node_coord.returns)s
def sanity_check(vcs): """Do sanity check before making changes Check that we are not on a tag and/or do not have local changes. Returns True when all is fine. """ if not vcs.is_clean_checkout(): q = ("This is NOT a clean checkout. You are on a tag or you have " "local changes.\n" "Are you sure you want to continue?") if not ask(q, default=False): sys.exit(1)
Do sanity check before making changes Check that we are not on a tag and/or do not have local changes. Returns True when all is fine.
def make_objs(names, out_dir=''): """ Make object file names for cl.exe and link.exe. """ objs = [replace_ext(name, '.obj') for name in names] if out_dir: objs = [os.path.join(out_dir, obj) for obj in objs] return objs
Make object file names for cl.exe and link.exe.
def findViewsContainingPoint(self, (x, y), _filter=None): ''' Finds the list of Views that contain the point (x, y). ''' if not _filter: _filter = lambda v: True return [v for v in self.views if (v.containsPoint((x,y)) and _filter(v))]
Finds the list of Views that contain the point (x, y).
def getAPOBECFrequencies(dotAlignment, orig, new, pattern): """ Gets mutation frequencies if they are in a certain pattern. @param dotAlignment: result from calling basePlotter @param orig: A C{str}, naming the original base @param new: A C{str}, what orig was mutated to @param pattern: A C{str}m which pattern we're looking for (must be one of 'cPattern', 'tPattern') """ cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT'] tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT'] # choose the right pattern if pattern == 'cPattern': patterns = cPattern middleBase = 'C' else: patterns = tPattern middleBase = 'T' # generate the freqs dict with the right pattern freqs = defaultdict(int) for pattern in patterns: freqs[pattern] = 0 # get the subject sequence from dotAlignment subject = dotAlignment[0].split('\t')[3] # exclude the subject from the dotAlignment, so just the queries # are left over queries = dotAlignment[1:] for item in queries: query = item.split('\t')[1] index = 0 for queryBase in query: qBase = query[index] sBase = subject[index] if qBase == new and sBase == orig: try: plusSb = subject[index + 1] minusSb = subject[index - 1] except IndexError: plusSb = 'end' motif = '%s%s%s' % (minusSb, middleBase, plusSb) if motif in freqs: freqs[motif] += 1 index += 1 return freqs
Gets mutation frequencies if they are in a certain pattern. @param dotAlignment: result from calling basePlotter @param orig: A C{str}, naming the original base @param new: A C{str}, what orig was mutated to @param pattern: A C{str}m which pattern we're looking for (must be one of 'cPattern', 'tPattern')