code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def hitail(E: np.ndarray, diffnumflux: np.ndarray, isimE0: np.ndarray, E0: np.ndarray, Bhf: np.ndarray, bh: float, verbose: int = 0): Bh = np.empty_like(E0) for iE0 in np.arange(E0.size): Bh[iE0] = Bhf[iE0]*diffnumflux[isimE0[iE0], iE0] het = Bh*(E[:, None] / E0)**-bh het[E[:, None] <...
strickland 1993 said 0.2, but 0.145 gives better match to peak flux at 2500 = E0
def update_vrf_table_links(self, vrf_table, new_imp_rts, removed_imp_rts): assert vrf_table if new_imp_rts: self._link_vrf_table(vrf_table, new_imp_rts) if removed_imp_rts: self._remove_links_to_vrf_table_for_rts(vrf_table, ...
Update mapping from RT to VRF table.
def dtdEntity(self, name): ret = libxml2mod.xmlGetDtdEntity(self._o, name) if ret is None:raise treeError('xmlGetDtdEntity() failed') __tmp = xmlEntity(_obj=ret) return __tmp
Do an entity lookup in the DTD entity hash table and
def get_average_voltage(self, min_voltage=None, max_voltage=None): pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage) if len(pairs_in_range) == 0: return 0 total_cap_in_range = sum([p.mAh for p in pairs_in_r...
Average voltage for path satisfying between a min and max voltage. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. Returns: Aver...
def mediated_transfer_async( self, token_network_identifier: TokenNetworkID, amount: PaymentAmount, target: TargetAddress, identifier: PaymentID, fee: FeeAmount = MEDIATION_FEE, secret: Secret = None, secret_hash: SecretHash...
Transfer `amount` between this node and `target`. This method will start an asynchronous transfer, the transfer might fail or succeed depending on a couple of factors: - Existence of a path that can be used, through the usage of direct or intermediary channels. - ...
def write_events(self, outname): self.make_output_dir(outname) if '.hdf' in outname: self.write_to_hdf(outname) else: raise ValueError('Cannot write to this format')
Write the found events to a sngl inspiral table
def declare_string(self, value): byte_s = BytesIO(str(value).encode(ENCODING)) data_file = self.research_object.add_data_file(byte_s, content_type=TEXT_PLAIN) checksum = posixpath.basename(data_file) data_id = "data:%s" % posixpath.split(data_file)[1] entity = self.document.entit...
Save as string in UTF-8.
def match_path(rule, path): split_rule = split_by_slash(rule) split_path = split_by_slash(path) url_vars = {} if len(split_rule) != len(split_path): return False, {} for r, p in zip(split_rule, split_path): if r.startswith('{') and r.endswith('}'): url_vars[r[1:-1]] = p ...
Match path. >>> match_path('/foo', '/foo') (True, {}) >>> match_path('/foo', '/bar') (False, {}) >>> match_path('/users/{user_id}', '/users/1') (True, {'user_id': '1'}) >>> match_path('/users/{user_id}', '/users/not-integer') (True, {'user_id': 'not-integer'})
def decode_nibbles(value): nibbles_with_flag = bytes_to_nibbles(value) flag = nibbles_with_flag[0] needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1} is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1} if is_odd_length: raw_nibbles = nibbles_with_flag[1:] else: raw_nibbles ...
The inverse of the Hex Prefix function
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc): if not (emin_mev >= 0): raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,)) if not (emax_mev >= emin_mev): raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev)) i...
Set the energy distribution function to a power law. **Call signature** *emin_mev* The minimum energy of the distribution, in MeV *emax_mev* The maximum energy of the distribution, in MeV *delta* The power-law index of the distribution *ne_cc* ...
def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=''): stats_ts, pvals, nums = ts_stats_significance( ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast) return stats_ts, pvals, nums
Returns the statistics, pvalues and the actual number of bootstrap samples.
def _wrapper(func, *vect_args, **vect_kwargs): if not hasattr(func, '__name__'): func.__name__ = '{}.__call__'.format(func.__class__.__name__) return wraps(func)(_NumpyVectorizeWrapper(func, *vect_args, **vect_kwargs))
Return the vectorized wrapper function.
def _format_title_string(self, title_string): if "StreamTitle='" in title_string: tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix) ret_string = tmp[:tmp.find("';")] else: ret_string = title_string if '"art...
format mplayer's title
def register(listener): if not isinstance(listener, _EventListener): raise TypeError("Listeners for %s must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, or TopologyListener." % (listener,)) if isinstance(listener, CommandList...
Register a global event listener. :Parameters: - `listener`: A subclasses of :class:`CommandListener`, :class:`ServerHeartbeatListener`, :class:`ServerListener`, or :class:`TopologyListener`.
def _batch_entry(self): try: while True: self._batch_entry_run() except: self.exc_info = sys.exc_info() os.kill(self.pid, signal.SIGUSR1)
Entry point for the batcher thread.
def _untag_sentence(tagged_sentence): untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence) clean_sentence = JUNK_PATT.sub('', untagged_sentence) return clean_sentence.strip()
Removes all tags in the sentence, returning the original sentence without Medscan annotations. Parameters ---------- tagged_sentence : str The tagged sentence Returns ------- untagged_sentence : str Sentence with tags and annotations stripped out
def srfrec(body, longitude, latitude): body = ctypes.c_int(body) longitude = ctypes.c_double(longitude) latitude = ctypes.c_double(latitude) rectan = stypes.emptyDoubleVector(3) libspice.srfrec_c(body, longitude, latitude, rectan) return stypes.cVectorToPython(rectan)
Convert planetocentric latitude and longitude of a surface point on a specified body to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfrec_c.html :param body: NAIF integer code of an extended body. :type body: int :param longitude: Longitude of point in radians...
def create_box(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=0): h_box = Gtk.Box(orientation=orientation, spacing=spacing) h_box.set_homogeneous(False) return h_box
Function creates box. Based on orientation it can be either HORIZONTAL or VERTICAL
def GetCoinAssets(self): assets = set() for coin in self.GetCoins(): assets.add(coin.Output.AssetId) return list(assets)
Get asset ids of all coins present in the wallet. Returns: list: of UInt256 asset id's.
def find_project_by_short_name(short_name, pbclient, all=None): try: response = pbclient.find_project(short_name=short_name, all=all) check_api_error(response) if (len(response) == 0): msg = '%s not found! You can use the all=1 argument to \ search in all the s...
Return project by short_name.
def import_dashboards(path, recursive): p = Path(path) files = [] if p.is_file(): files.append(p) elif p.exists() and not recursive: files.extend(p.glob('*.json')) elif p.exists() and recursive: files.extend(p.rglob('*.json')) for f in files: logging.info('Importi...
Import dashboards from JSON
def commit_or_abort(self, ctx, timeout=None, metadata=None, credentials=None): return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata, credentials=credentials)
Runs commit or abort operation.
def hook_scope(self, name=""): assert not self.revision self.cursor.execute( 'insert into hooks (hook, date) values (?, ?)', (name or sys.argv[0], datetime.datetime.utcnow().isoformat())) self.revision = self.cursor.lastrowid try: yield se...
Scope all future interactions to the current hook execution revision.
async def _wait(self): for buid in self.otherbldgbuids: nodeevnt = self.allbldgbuids.get(buid) if nodeevnt is None: continue await nodeevnt[1].wait()
Wait on the other editatoms who are constructing nodes my new nodes refer to
def get_service_packages(self): api = self._get_api(billing.DefaultApi) package_response = api.get_service_packages() packages = [] for state in PACKAGE_STATES: items = getattr(package_response, state) or [] for item in ensure_listable(items): para...
Get all service packages
def database_current_migration(self): if not self.migration_table.exists(self.session.bind): return None if self.migration_data is None: return None return self.migration_data.version
Return the current migration in the database.
def _gcd(a, b): while b: a, b = b, (a % b) return a
Calculate the Greatest Common Divisor of a and b. Unless b==0, the result will have the same sign as b (so that when b is divided by it, the result comes out positive).
def _cast_repr(self, caster, *args, **kwargs): if self.__repr_content is None: self.__repr_content = hash_and_truncate(self) assert self.__uses_default_repr return caster(self.__repr_content, *args, **kwargs)
Will cast this constant with the provided caster, passing args and kwargs. If there is no registered representation, will hash the name using sha512 and use the first 8 bytes of the digest.
def convertLatLngToPixelXY(self, lat, lng, level): mapSize = self.getMapDimensionsByZoomLevel(level) lat = self.clipValue(lat, self.min_lat, self.max_lat) lng = self.clipValue(lng, self.min_lng, self.max_lng) x = (lng + 180) / 360 sinlat = math.sin(lat * math.pi / 180) y ...
returns the x and y values of the pixel corresponding to a latitude and longitude.
def indent(text, amount, ch=' '): padding = amount * ch return ''.join(padding+line for line in text.splitlines(True))
Indents a string by the given amount of characters.
def _get_svc_path(name='*', status=None): if not SERVICE_DIR: raise CommandExecutionError('Could not find service directory.') ena = set() for el in glob.glob(os.path.join(SERVICE_DIR, name)): if _is_svc(el): ena.add(os.readlink(el)) log.trace('found enabled service p...
Return a list of paths to services with ``name`` that have the specified ``status`` name a glob for service name. default is '*' status None : all services (no filter, default choice) 'DISABLED' : available service(s) that is not enabled 'ENABLED' : enabled service (whet...
def inner(self, x1, x2): if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' ...
Return the inner product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose inner product to compute. Returns ------- inner : `LinearSpace.field` element Inner product of ``x1`` and ``x2``.
def delete_from_all_link_group(self, group): msg = StandardSend(self._address, COMMAND_DELETE_FROM_ALL_LINK_GROUP_0X02_NONE, cmd2=group) self._send_msg(msg)
Delete a device to an All-Link Group.
def _join_domain(domain, username=None, password=None, account_ou=None, account_exists=False): NETSETUP_JOIN_DOMAIN = 0x1 NETSETUP_ACCOUNT_CREATE = 0x2 NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 NETSETUP_JOIN_WITH_NEW_NAME = 0x400 join_o...
Helper function to join the domain. Args: domain (str): The domain to which the computer should be joined, e.g. ``example.com`` username (str): Username of an account which is authorized to join computers to the specified domain. Need to be either fully qualified ...
def close(self): if not self._closed: try: self._cursor.close() except Exception: pass self._closed = True
Close the tough cursor. It will not complain if you close it more than once.
def reload_module(self, module_name): module = self.loaded_modules.get(module_name) if module: module.stop(reloading=True) else: _log.info("Reload loading new module module '%s'", module_name) success = self.load_module(module_name) ...
Reloads the specified module without changing its ordering. 1. Calls stop(reloading=True) on the module 2. Reloads the Module object into .loaded_modules 3. Calls start(reloading=True) on the new object If called with a module name that is not currently loaded, it will load it....
def seek_to_end(self, *partitions): if not all([isinstance(p, TopicPartition) for p in partitions]): raise TypeError('partitions must be TopicPartition namedtuples') if not partitions: partitions = self._subscription.assigned_partitions() assert partitions, 'No partit...
Seek to the most recent available offset for partitions. Arguments: *partitions: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions. Raises: AssertionError: If any partition is not currently assigned, or if ...
def get_gene_count_tab(infile, bc_getter=None): gene = None counts = collections.Counter() for line in infile: values = line.strip().split("\t") assert len(values) == 2, "line: %s does not contain 2 columns" % line read_id, assigned_gene = values if ass...
Yields the counts per umi for each gene bc_getter: method to get umi (plus optionally, cell barcode) from read, e.g get_umi_read_id or get_umi_tag TODO: ADD FOLLOWING OPTION skip_regex: skip genes matching this regex. Useful to ignore unassigned reads (as per get_bundles class above)
def sprinkler_reaches_cell(x, y, sx, sy, r): dx = sx - x dy = sy - y return math.sqrt(dx ** 2 + dy ** 2) <= r
Return whether a cell is within the radius of the sprinkler. x: column index of cell y: row index of cell sx: column index of sprinkler sy: row index of sprinkler r: sprinkler radius
def selected_purpose(self): item = self.lstCategories.currentItem() try: return definition(item.data(QtCore.Qt.UserRole)) except (AttributeError, NameError): return None
Obtain the layer purpose selected by user. :returns: Metadata of the selected layer purpose. :rtype: dict, None
def get_current_user(with_domain=True): try: user_name = win32api.GetUserNameEx(win32api.NameSamCompatible) if user_name[-1] == '$': test_user = win32api.GetUserName() if test_user == 'SYSTEM': user_name = 'SYSTEM' elif get_sid_from_name(test_user)...
Gets the user executing the process Args: with_domain (bool): ``True`` will prepend the user name with the machine name or domain separated by a backslash Returns: str: The user name
def _checkAndConvertIndex(self, index): if index < 0: index = len(self) + index if index < 0 or index >= self._doc.blockCount(): raise IndexError('Invalid block index', index) return index
Check integer index, convert from less than zero notation
def audio_open(path, backends=None): if backends is None: backends = available_backends() for BackendClass in backends: try: return BackendClass(path) except DecodeError: pass raise NoBackendError()
Open an audio file using a library that is available on this system. The optional `backends` parameter can be a list of audio file classes to try opening the file with. If it is not provided, `audio_open` tries all available backends. If you call this function many times, you can avoid the cost of ...
def kind(self): with self._mutex: kind = self._obj.get_kind() if kind == RTC.PERIODIC: return self.PERIODIC elif kind == RTC.EVENT_DRIVEN: return self.EVENT_DRIVEN else: return self.OTHER
The kind of this execution context.
def get_m2m_widget(cls, field): return functools.partial( widgets.ManyToManyWidget, model=get_related_model(field))
Prepare widget for m2m field
def _product_filter(products) -> str: _filter = 0 for product in {PRODUCTS[p] for p in products}: _filter += product return format(_filter, "b")[::-1]
Calculate the product filter.
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]: self.wait_to_finish() if self.decoder_metric_queue.empty(): if self._results_pending: self._any_process_died = True self._results_pending = False return None decoded_checkpoin...
Returns the decoded checkpoint and the decoder metrics or None if the queue is empty.
def generate_gap_bed(fname, outname): f = Fasta(fname) with open(outname, "w") as bed: for chrom in f.keys(): for m in re.finditer(r'N+', f[chrom][:].seq): bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0)))
Generate a BED file with gap locations. Parameters ---------- fname : str Filename of input FASTA file. outname : str Filename of output BED file.
def zoom_fit(self): zoom = self.grid.grid_renderer.zoom grid_width, grid_height = self.grid.GetSize() rows_height = self._get_rows_height() + \ (float(self.grid.GetColLabelSize()) / zoom) cols_width = self._get_cols_width() + \ (float(self.grid.GetRowLabelSize()) ...
Zooms the rid to fit the window. Only has an effect if the resulting zoom level is between minimum and maximum zoom level.
def total_power(self): power = self.average_current * self.voltage return round(power, self.sr)
Total power used.
def get_group_id(name, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('sg-'): log.debug('group %s is a group id. get_group_id not called.', name) return name ...
Get a Group ID given a Group Name or Group Name and VPC ID CLI example:: salt myminion boto_secgroup.get_group_id mysecgroup
def _rewrite_f(self, q): if isinstance(q, models.F): q.name = rewrite_lookup_key(self.model, q.name) return q if isinstance(q, Node): q.children = list(map(self._rewrite_f, q.children)) if hasattr(q, 'lhs'): q.lhs = self._rewrite_f(q.lhs) i...
Rewrite field names inside F call.
def squared_error(eval_data, predictions, scores='ignored', learner='ignored'): return [np.sum((np.array(pred) - np.array(inst.output)) ** 2) for inst, pred in zip(eval_data, predictions)]
Return the squared error of each prediction in `predictions` with respect to the correct output in `eval_data`. >>> data = [Instance('input', (0., 0., 1.)), ... Instance('input', (0., 1., 1.)), ... Instance('input', (1., 0., 0.))] >>> squared_error(data, [(0., 1., 1.), (0., 1., 1.),...
def update(self, report: str = None) -> bool: if report is not None: self.raw = report else: raw = self.service.fetch(self.station) if raw == self.raw: return False self.raw = raw self.data, self.units = metar.parse(self.station, se...
Updates raw, data, and translations by fetching and parsing the METAR report Returns True is a new report is available, else False
def btc_tx_script_to_asm( script_hex ): if len(script_hex) == 0: return "" try: script_array = btc_script_deserialize(script_hex) except: log.error("Failed to convert '%s' to assembler" % script_hex) raise script_tokens = [] for token in script_array: if token...
Decode a script into assembler
def not_empty(value, allow_empty = False, **kwargs): if not value and allow_empty: return None elif not value: raise errors.EmptyValueError('value was empty') return value
Validate that ``value`` is not empty. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults t...
def _get_pgtiou(pgt): pgtIou = None retries_left = 5 if not settings.CAS_PGT_FETCH_WAIT: retries_left = 1 while not pgtIou and retries_left: try: return PgtIOU.objects.get(tgt=pgt) except PgtIOU.DoesNotExist: if settings.CAS_PGT_FETCH_WAIT: ...
Returns a PgtIOU object given a pgt. The PgtIOU (tgt) is set by the CAS server in a different request that has completed before this call, however, it may not be found in the database by this calling thread, hence the attempt to get the ticket is retried for up to 5 seconds. This should be handled some...
def FindProxies(): sc = objc.SystemConfiguration() settings = sc.dll.SCDynamicStoreCopyProxies(None) if not settings: return [] try: cf_http_enabled = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPEnable") if cf_http_enabled and bool(sc.CFNumToInt32(cf_http_enabled)): cfproxy = sc.CFDictRe...
This reads the OSX system configuration and gets the proxies.
def passphrase_file(passphrase=None): cmd = [] pass_file = None if not passphrase and 'CRYPTORITO_PASSPHRASE_FILE' in os.environ: pass_file = os.environ['CRYPTORITO_PASSPHRASE_FILE'] if not os.path.isfile(pass_file): raise CryptoritoError('CRYPTORITO_PASSPHRASE_FILE is invalid') ...
Read passphrase from a file. This should only ever be used by our built in integration tests. At this time, during normal operation, only pinentry is supported for entry of passwords.
def associate_psds_to_single_ifo_segments(opt, fd_segments, gwstrain, flen, delta_f, flow, ifo, dyn_range_factor=1., precision=None): single_det_opt = copy_opts_for_single_ifo(opt, ifo) associate_psds_to_segments(single_det_opt,...
Associate PSDs to segments for a single ifo when using the multi-detector CLI
def make_gating_node(workflow, datafind_files, outdir=None, tags=None): cp = workflow.cp if tags is None: tags = [] condition_strain_class = select_generic_executable(workflow, "condition_strain") condition_strain_nodes = [] condition_st...
Generate jobs for autogating the data for PyGRB runs. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. datafind_files : pycbc.workflow.core.FileList A FileList containing the frame files to be gated. outdir : stri...
def _load_version(cls, state, version): assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION) knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model']) del state['knn_model'] state['_target_type'] = eval(state['_target_type']) return cls(knn_model, state)
A function to load a previously saved NearestNeighborClassifier model. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer.
def ModuleLogger(globs): if not globs.has_key('_debug'): raise RuntimeError("define _debug before creating a module logger") logger_name = globs['__name__'] logger = logging.getLogger(logger_name) logger.globs = globs if '.' not in logger_name: hdlr = logging.StreamHandler() ...
Create a module level logger. To debug a module, create a _debug variable in the module, then use the ModuleLogger function to create a "module level" logger. When a handler is added to this logger or a child of this logger, the _debug variable will be incremented. All of the calls within functio...
def _merge_inplace(self, other): if other is None: yield else: priority_vars = OrderedDict( kv for kv in self.variables.items() if kv[0] not in self.dims) variables = merge_coords_for_inplace_math( [self.variables, other.variables], pri...
For use with in-place binary arithmetic.
def validate(opts): if hasattr(opts, 'extensions'): return _validate(opts.extensions) elif isinstance(opts, list): return _validate(opts) else: raise ValueError("Value passed into extension validation must either " "be a list of strings or a namespace with an...
Client-facing validate method. Checks to see if the passed in opts argument is either a list or a namespace containing the attribute 'extensions' and runs validations on it accordingly. If opts is neither of those things, this will raise a ValueError :param opts: either a list of strings or a namespace...
def __populate_sections(self): if not self._ptr: raise BfdException("BFD not initialized") for section in _bfd.get_sections_list(self._ptr): try: bfd_section = BfdSection(self._ptr, section) self._sections[bfd_section.name] = bfd_section ...
Get a list of the section present in the bfd to populate our internal list.
def as_euler_angles(q): alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float) n = np.norm(q) q = as_float_array(q) alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2]) alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n)) ...
Open Pandora's Box If somebody is trying to make you use Euler angles, tell them no, and walk away, and go and tell your mum. You don't want to use Euler angles. They are awful. Stay away. It's one thing to convert from Euler angles to quaternions; at least you're moving in the right direction....
def chunks(event_list, chunk_size): for i in range(0, len(event_list), chunk_size): yield event_list[i:i + chunk_size]
Yield successive n-sized chunks from the event list.
def move_notes(self, noteids, folderid): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/notes/move', post_data={ 'not...
Move notes to a folder :param noteids: The noteids to move :param folderid: The folderid to move notes to
def make_python_name(s, default=None, number_prefix='N',encoding="utf-8"): if s in ('', None): s = default s = str(s) s = re.sub("[^a-zA-Z0-9_]", "_", s) if not re.match('\d', s) is None: s = number_prefix+s return unicode(s, encoding)
Returns a unicode string that can be used as a legal python identifier. :Arguments: *s* string *default* use *default* if *s* is ``None`` *number_prefix* string to prepend if *s* starts with a number
def is_datetime_arraylike(arr): if isinstance(arr, ABCDatetimeIndex): return True elif isinstance(arr, (np.ndarray, ABCSeries)): return (is_object_dtype(arr.dtype) and lib.infer_dtype(arr, skipna=False) == 'datetime') return getattr(arr, 'inferred_type', None) == 'datetime'
Check whether an array-like is a datetime array-like or DatetimeIndex. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a datetime array-like or DatetimeIndex. Examples -------- >>> is_...
def describe_connection(self): if self.device==None: return "%s [disconnected]" % (self.name) else: return "%s connected to %s %s version: %s [serial: %s]" % (self.name, self.vendor_name, self.product_name, self.version_number, self.serial_number)
Return string representation of the device, including the connection state
def run(items): items = [utils.to_single_data(x) for x in items] work_dir = _sv_workdir(items[0]) input_backs = list(set(filter(lambda x: x is not None, [dd.get_background_cnv_reference(d, "seq2c") for d in items]))) coverage_file = _combine_coverages(items, work_dir, i...
Normalization and log2 ratio calculation plus CNV calling for full cohort. - Combine coverage of each region for each sample - Prepare read counts for each sample - Normalize coverages in cohort by gene and sample, and calculate log2 ratios - Call amplifications and deletions
def computeISI(spikeTrains): zeroCount = 0 isi = [] cells = 0 for i in range(np.shape(spikeTrains)[0]): if cells > 0 and cells % 250 == 0: print str(cells) + " cells processed" for j in range(np.shape(spikeTrains)[1]): if spikeTrains[i][j] == 0: zeroCount += 1 elif zeroCount > ...
Estimates the inter-spike interval from a spike train matrix. @param spikeTrains (array) matrix of spike trains @return isi (array) matrix with the inter-spike interval obtained from the spike train. Each entry in this matrix represents the number of time-steps in-between 2 spikes as the algo...
def color(colors, export_type, output_file=None): all_colors = flatten_colors(colors) template_name = get_export_type(export_type) template_file = os.path.join(MODULE_DIR, "templates", template_name) output_file = output_file or os.path.join(CACHE_DIR, template_name) if os.path.isfile(template_file)...
Export a single template file.
def export(self, remote_function): if self._worker.mode is None: self._functions_to_export.append(remote_function) return if self._worker.mode != ray.worker.SCRIPT_MODE: return self._do_export(remote_function)
Export a remote function. Args: remote_function: the RemoteFunction object.
def serialize(self, value): if isinstance(value, float) and self.as_type is six.text_type: value = u'{0:.{1}f}'.format(value, self.float_places) value = self.as_type(value) elif self.as_type is six.text_type: if isinstance(value, bool): value = six.tex...
Convert the external Python value to a type that is suitable for storing in a Mutagen file object.
def listen_to_node(self, id_): if r_client.get(id_) is None: return else: self.toredis.subscribe(_pubsub_key(id_), callback=self.callback) self._listening_to[_pubsub_key(id_)] = id_ return id_
Attach a callback on the job pubsub if it exists
def _validate(self): probably_good_to_go = True sheet = self.table identity = self.db_sheet_cols.id id_col = sheet.loc[:, identity] if any(id_col.duplicated()): warnings.warn( "your database is corrupt: duplicates" " encountered in the ...
Checks that the db-file is ok Returns: True if OK, False if not.
def chunkWidgets(self, group): ui_groups = [] subgroup = [] for index, item in enumerate(group['items']): if getin(item, ['options', 'full_width'], False): ui_groups.append(subgroup) ui_groups.append([item]) subgroup = [] ...
chunk the widgets up into groups based on their sizing hints
def get_buckets(self, bucket_type=None, timeout=None): bucket_type = self._get_bucket_type(bucket_type) url = self.bucket_list_path(bucket_type=bucket_type, timeout=timeout) status, headers, body = self._request('GET', url) if status == 200: ...
Fetch a list of all buckets
def display(port=None, height=None): _display(port=port, height=height, print_message=True, display_handle=None)
Display a TensorBoard instance already running on this machine. Args: port: The port on which the TensorBoard server is listening, as an `int`, or `None` to automatically select the most recently launched TensorBoard. height: The height of the frame into which to render the TensorBoard UI, ...
def get_interpolated_value(self, energy): f = {} for spin in self.densities.keys(): f[spin] = get_linear_interpolated_value(self.energies, self.densities[spin], energy) return f
Returns interpolated density for a particular energy. Args: energy: Energy to return the density for.
def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxw...
Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int Th...
def statcast(start_dt=None, end_dt=None, team=None, verbose=True): start_dt, end_dt = sanitize_input(start_dt, end_dt) small_query_threshold = 5 if start_dt and end_dt: date_format = "%Y-%m-%d" d1 = datetime.datetime.strptime(start_dt, date_format) d2 = datetime.datetime.strptime(end...
Pulls statcast play-level data from Baseball Savant for a given date range. INPUTS: start_dt: YYYY-MM-DD : the first date for which you want statcast data end_dt: YYYY-MM-DD : the last date for which you want statcast data team: optional (defaults to None) : city abbreviation of the team you want data ...
def zdiffstore(self, dest, keys, withscores=False): keys = (dest,) + tuple(keys) wscores = 'withscores' if withscores else '' return self.execute_script('zdiffstore', keys, wscores, withscores=withscores)
Compute the difference of multiple sorted. The difference of sets specified by ``keys`` into a new sorted set in ``dest``.
def get_data_len(self): padding_len = self.getfieldval('padlen') fld, fval = self.getfield_and_val('padlen') padding_len_len = fld.i2len(self, fval) ret = self.s_len - padding_len_len - padding_len assert(ret >= 0) return ret
get_data_len computes the length of the data field To do this computation, the length of the padlen field and the actual padding is subtracted to the string that was provided to the pre_dissect # noqa: E501 fun of the pkt parameter @return int; length of the data part of the HTTP/2 fra...
def RawBytesToScriptHash(raw): rawh = binascii.unhexlify(raw) rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8')) return UInt160(data=rawhashstr)
Get a hash of the provided raw bytes using the ripemd160 algorithm. Args: raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC' Returns: UInt160:
def dumplist(args): from .query import Database db = Database() r = db.objects( protocol=args.protocol, purposes=args.purpose, model_ids=(args.client,), groups=args.group, classes=args.sclass ) output = sys.stdout if args.selftest: from bob.db.utils import null output =...
Dumps lists of files based on your criteria
def flg(self, name, help, abbrev=None): abbrev = abbrev or '-' + name[0] longname = '--' + name.replace('_', '-') self._add(name, abbrev, longname, action='store_true', help=help)
Describe a flag
def GetValue( self, Channel, Parameter): try: if Parameter == PCAN_API_VERSION or Parameter == PCAN_HARDWARE_NAME or Parameter == PCAN_CHANNEL_VERSION or Parameter == PCAN_LOG_LOCATION or Parameter == PCAN_TRACE_LOCATION or Parameter == PCAN_BITRATE_INFO_FD or Parameter == PC...
Retrieves a PCAN Channel value Remarks: Parameters can be present or not according with the kind of Hardware (PCAN Channel) being used. If a parameter is not available, a PCAN_ERROR_ILLPARAMTYPE error will be returned. The return value of this method is a 2-touple, wher...
def run_missing_simulations(self, param_list, runs=None): if isinstance(param_list, dict): param_list = list_param_combinations(param_list) self.run_simulations( self.get_missing_simulations(param_list, runs))
Run the simulations from the parameter list that are not yet available in the database. This function also makes sure that we have at least runs replications for each parameter combination. Additionally, param_list can either be a list containing the desired parameter combinati...
def sync_experiments_from_spec(filename): redis = oz.redis.create_connection() with open(filename, "r") as f: schema = escape.json_decode(f.read()) oz.bandit.sync_from_spec(redis, schema)
Takes the path to a JSON file declaring experiment specifications, and modifies the experiments stored in redis to match the spec. A spec looks like this: { "experiment 1": ["choice 1", "choice 2", "choice 3"], "experiment 2": ["choice 1", "choice 2"] }
def sup_of_layouts(layout1, layout2): if len(layout1) > len(layout2): layout1, layout2 = layout2, layout1 if len(layout1) < len(layout2): layout1 += [0] * (len(layout2) - len(layout1)) return [max(layout1[i], layout2[i]) for i in xrange(len(layout1))]
Return the least layout compatible with layout1 and layout2
def autoescape(context, nodelist, setting): old_setting = context.autoescape context.autoescape = setting output = nodelist.render(context) context.autoescape = old_setting if setting: return mark_safe(output) else: return output
Force autoescape behaviour for this block.
def _fetch_chunker(self, uri, chunk_size, size, obj_size): pos = 0 total_bytes = 0 size = size or obj_size max_size = min(size, obj_size) while True: endpos = min(obj_size, pos + chunk_size - 1) headers = {"Range": "bytes=%s-%s" % (pos, endpos)} ...
Returns a generator that returns an object in chunks.
def incrementSub(self, amount=1): self._subProgressBar.setValue(self.subValue() + amount) QApplication.instance().processEvents()
Increments the sub-progress bar by amount.
def terminate_jobflows(self, jobflow_ids): params = {} self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') return self.get_status('TerminateJobFlows', params, verb='POST')
Terminate an Elastic MapReduce job flow :type jobflow_ids: list :param jobflow_ids: A list of job flow IDs
def sessions_info(self, hosts): info_by_id = {} for server_endpoint, dump in self.dump_by_server(hosts).items(): server_ip, server_port = server_endpoint for line in dump.split("\n"): mat = self.IP_PORT_REGEX.match(line) if mat is None: ...
Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo).
def transform_flask_bare_import(node): new_names = [] for (name, as_name) in node.names: match = re.match(r'flask\.ext\.(.*)', name) from_name = match.group(1) actual_module_name = 'flask_{}'.format(from_name) new_names.append((actual_module_name, as_name)) new_node = nodes.I...
Translates a flask.ext.wtf bare import into a non-magical import. Translates: import flask.ext.admin as admin Into: import flask_admin as admin