code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _readXput(self, fileCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None): for card in self.projectCards: if (card.name in fileCards) and self._noneOrNumValue(card.value) and fileCards[card.name]: fileIO = fileCards[card.name] filename = card.value.strip('"') self._invokeRead(fileIO=fileIO, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
GSSHAPY Project Read Files from File Method
def join(self): while True: for consumer in self.consumers.values(): consumer.delay_queue.join() self.work_queue.join() for consumer in self.consumers.values(): if consumer.delay_queue.unfinished_tasks: break else: if self.work_queue.unfinished_tasks: continue return
Wait for this worker to complete its work in progress. This method is useful when testing code.
def loads(self, param): if isinstance(param, ProxyRef): try: return self.lookup_url(param.url, param.klass, param.module) except HostError: print "Can't lookup for the actor received with the call. \ It does not exist or the url is unreachable.", param raise HostError(param) elif isinstance(param, list): return [self.loads(elem) for elem in param] elif isinstance(param, tuple): return tuple([self.loads(elem) for elem in param]) elif isinstance(param, dict): new_dict = param for key in new_dict.keys(): new_dict[key] = self.loads(new_dict[key]) return new_dict else: return param
Checks the return parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host.
def child(self, path): if self._shareID is not None: self = url.URL.child(self, self._shareID) self._shareID = None return url.URL.child(self, path)
Override the base implementation to inject the share ID our constructor was passed.
def remove_duplicate_faces(self): unique, inverse = grouping.unique_rows(np.sort(self.faces, axis=1)) self.update_faces(unique)
On the current mesh remove any faces which are duplicates. Alters ---------- self.faces : removes duplicates
def cublasSsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc): status = _libcublas.cublasSsymm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_float(beta)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for symmetric matrix.
def main_production(self): for rule in self.productions: if rule.leftside[0] == self._initialsymbol: return rule raise IndexError
Returns main rule
def get_upper_triangle(correlation_matrix): upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool)) upper_tri_df = upper_triangle.stack().reset_index(level=1) upper_tri_df.columns = ['rid', 'corr'] upper_tri_df.reset_index(level=0, inplace=True) upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0) return upper_tri_df.round(rounding_precision)
Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value
def labels(self): return [ name for name in os.listdir(self.root) if os.path.isdir(os.path.join(self.root, name)) ]
Return the unique labels assigned to the documents.
def _run_task(task, start_message, finished_message): env.hosts = fabconf['EC2_INSTANCES'] start = time.time() if env.hosts == []: print("There are EC2 instances defined in project_conf.py, please add some instances and try again") print("or run 'fab spawn_instance' to create an instance") return print(_yellow(start_message)) for item in task: try: print(_yellow(item['message'])) except KeyError: pass globals()["_" + item['action']](item['params']) print(_yellow("%s in %.2fs" % (finished_message, time.time() - start)))
Tasks a task from tasks.py and runs through the commands on the server
def asksaveasfile(mode="w", **options): "Ask for a filename to save as, and returned the opened file" filename = asksaveasfilename(**options) if filename: return open(filename, mode) return None
Ask for a filename to save as, and returned the opened file
def unique_(self, col): try: df = self.df.drop_duplicates(subset=[col], inplace=False) return list(df[col]) except Exception as e: self.err(e, "Can not select unique data")
Returns unique values in a column
def _validate_storage(storage, service_name, add_error): if storage is None: return if not isdict(storage): msg = 'service {} has invalid storage constraints {}'.format( service_name, storage) add_error(msg)
Lazily validate the storage constraints, ensuring that they are a dict. Use the given add_error callable to register validation error.
def cache_path(self): cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache') if not os.path.exists(cache_path): os.mkdir(cache_path) return cache_path
make a directory to store all caches Returns: --------- cache path
def get_user_token(): if not hasattr(stack.top, 'current_user'): return '' current_user = stack.top.current_user return current_user.get('token', '')
Return the authenticated user's auth token
def zrank(self, name, value): with self.pipe as pipe: value = self.valueparse.encode(value) return pipe.zrank(self.redis_key(name), value)
Returns the rank of the element. :param name: str the name of the redis key :param value: the element in the sorted set
def escape(url): if salt.utils.platform.is_windows(): return url scheme = urlparse(url).scheme if not scheme: if url.startswith('|'): return url else: return '|{0}'.format(url) elif scheme == 'salt': path, saltenv = parse(url) if path.startswith('|'): return create(path, saltenv) else: return create('|{0}'.format(path), saltenv) else: return url
add escape character `|` to `url`
def set_destination_ip_responded(self, last_hop): if not self.destination_address: return for packet in last_hop.packets: if packet.origin and \ self.destination_address == packet.origin: self.destination_ip_responded = True break
Sets the flag if destination IP responded.
def pitch_shifter(self, chunk, shift): freq = numpy.fft.rfft(chunk) N = len(freq) shifted_freq = numpy.zeros(N, freq.dtype) S = numpy.round(shift if shift > 0 else N + shift, 0) s = N - S shifted_freq[:S] = freq[s:] shifted_freq[S:] = freq[:s] shifted_chunk = numpy.fft.irfft(shifted_freq) return shifted_chunk.astype(chunk.dtype)
Pitch-Shift the given chunk by shift semi-tones.
def generation(self): if not self.parent: return 0 elif self.parent.is_dict: return 1 + self.parent.generation else: return self.parent.generation
Returns the number of ancestors that are dictionaries
def extract(self, reads_to_extract, database_fasta_file, output_file): cmd = "fxtract -XH -f /dev/stdin '%s' > %s" % ( database_fasta_file, output_file) extern.run(cmd, stdin='\n'.join(reads_to_extract))
Extract the reads_to_extract from the database_fasta_file and put them in output_file. Parameters ---------- reads_to_extract: Iterable of str IDs of reads to be extracted database_fasta_file: str path the fasta file that containing the reads output_file: str path to the file where they are put Returns ------- Nothing
def _parse_abbreviation(uri_link): abbr = re.sub(r'/[0-9]+\..*htm.*', '', uri_link('a').attr('href')) abbr = re.sub(r'/.*/schools/', '', abbr) abbr = re.sub(r'/teams/', '', abbr) return abbr.upper()
Returns a team's abbreviation. A school or team's abbreviation is generally embedded in a URI link which contains other relative link information. For example, the URI for the New England Patriots for the 2017 season is "/teams/nwe/2017.htm". This function strips all of the contents before and after "nwe" and converts it to uppercase and returns "NWE". Parameters ---------- uri_link : string A URI link which contains a team's abbreviation within other link contents. Returns ------- string The shortened uppercase abbreviation for a given team.
def isAuxilied(self): benefics = [const.VENUS, const.JUPITER] return self.__sepApp(benefics, aspList=[0, 60, 120])
Returns if the object is separating and applying to a benefic considering good aspects.
def from_json(cls, key): obj = cls() try: jkey = json_decode(key) except Exception as e: raise InvalidJWKValue(e) obj.import_key(**jkey) return obj
Creates a RFC 7517 JWK from the standard JSON format. :param key: The RFC 7517 representation of a JWK.
def dispatch(self, request, *args, **kwargs): conf = IdPConfig() try: conf.load(copy.deepcopy(settings.SAML_IDP_CONFIG)) self.IDP = Server(config=conf) except Exception as e: return self.handle_error(request, exception=e) return super(IdPHandlerViewMixin, self).dispatch(request, *args, **kwargs)
Construct IDP server with config from settings dict
def interpolate(self, traj, ti, k=3, der=0, ext=2): r u = traj[:, 0] n = traj.shape[1] x = [traj[:, i] for i in range(1, n)] tck, t = interpolate.splprep(x, u=u, k=k, s=0) out = interpolate.splev(ti, tck, der, ext) interp_traj = np.hstack((ti[:, np.newaxis], np.array(out).T)) return interp_traj
r""" Parametric B-spline interpolation in N-dimensions. Parameters ---------- traj : array_like (float) Solution trajectory providing the data points for constructing the B-spline representation. ti : array_like (float) Array of values for the independent variable at which to interpolate the value of the B-spline. k : int, optional(default=3) Degree of the desired B-spline. Degree must satisfy :math:`1 \le k \le 5`. der : int, optional(default=0) The order of derivative of the spline to compute (must be less than or equal to `k`). ext : int, optional(default=2) Controls the value of returned elements for outside the original knot sequence provided by traj. For extrapolation, set `ext=0`; `ext=1` returns zero; `ext=2` raises a `ValueError`. Returns ------- interp_traj: ndarray (float) The interpolated trajectory.
def _merge_a_into_b_simple(self, a, b): for k, v in a.items(): b[k] = v return b
Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. Do not do any checking.
def migrations_to_run(self): assert self.database_current_migration is not None db_current_migration = self.database_current_migration return [ (migration_number, migration_func) for migration_number, migration_func in self.sorted_migrations if migration_number > db_current_migration]
Get a list of migrations to run still, if any. Note that this will fail if there's no migration record for this class!
def _read_header(self): self._header = self.cdmrf.fetch_header() self.load_from_stream(self._header)
Get the needed header information to initialize dataset.
def validate(self, instance, value): if getattr(value, '__valid__', False): return value if isinstance(value, png.Image): pass else: value = super(ImagePNG, self).validate(instance, value) try: png.Reader(value).validate_signature() except png.FormatError: self.error(instance, value, extra='Open file is not PNG.') value.seek(0) output = BytesIO() output.name = self.filename output.__valid__ = True if isinstance(value, png.Image): value.save(output) else: fid = value fid.seek(0) output.write(fid.read()) fid.close() output.seek(0) return output
Checks if value is an open PNG file, valid filename, or png.Image Returns an open bytestream of the image
def read_json_file(path): with open(path, 'r', encoding='utf-8') as f: data = json.load(f) return data
Reads and return the data from the json file at the given path. Parameters: path (str): Path to read Returns: dict,list: The read json as dict/list.
def delete(self): self.query.filter_by(id=self.id).delete() return self
Delete a file instance. The file instance can be deleted if it has no references from other objects. The caller is responsible to test if the file instance is writable and that the disk file can actually be removed. .. note:: Normally you should use the Celery task to delete a file instance, as this method will not remove the file on disk.
def float_format(self, value): if isinstance(value, str): '{0:{1}}'.format(1.23, value) self._float_format = value else: raise TypeError('Floating point format code must be a string.')
Validate and set the upper case flag.
def resource_references(self, resource) -> Mapping[str, List[Any]]: references = dict() for reference_label in resource.props.references: references[reference_label] = [] for target_label in resource.props.references.get(reference_label): target = self.get_reference(reference_label, target_label) references[reference_label].append(target) return references
Resolve and return reference resources pointed to by object Fields in resource.props can flag that they are references by using the references type. This method scans the model, finds any fields that are references, and returns the reference resources pointed to by those references. Note that we shouldn't get to the point of dangling references. Our custom Sphinx event should raise a references error during the build process (though maybe it is just a warning?)
def _get_alphanumeric_index(query_string): try: return [int(query_string), 'int'] except ValueError: if len(query_string) == 1: if query_string.isupper(): return [string.ascii_uppercase.index(query_string), 'char_hi'] elif query_string.islower(): return [string.ascii_lowercase.index(query_string), 'char_lo'] else: raise IOError('The input is a string longer than one character')
Given an input string of either int or char, returns what index in the alphabet and case it is :param query_string: str, query string :return: (int, str), list of the index and type
def insert_ordered(value, array): index = 0 for n in range(0,len(array)): if value >= array[n]: index = n+1 array.insert(index, value) return index
This will insert the value into the array, keeping it sorted, and returning the index where it was inserted
def socketBinaryStream(self, hostname, port, length): deserializer = TCPDeserializer(self._context) tcp_binary_stream = TCPBinaryStream(length) tcp_binary_stream.listen(port, hostname) self._on_stop_cb.append(tcp_binary_stream.stop) return DStream(tcp_binary_stream, self, deserializer)
Create a TCP socket server for binary input. .. warning:: This is not part of the PySpark API. :param string hostname: Hostname of TCP server. :param int port: Port of TCP server. :param length: Message length. Length in bytes or a format string for ``struct.unpack()``. For variable length messages where the message length is sent right before the message itself, ``length`` is a format string that can be passed to ``struct.unpack()``. For example, use ``length='<I'`` for a little-endian (standard on x86) 32-bit unsigned int. :rtype: DStream
def get_live_scores(self, use_12_hour_format): req = requests.get(RequestHandler.LIVE_URL) if req.status_code == requests.codes.ok: scores_data = [] scores = req.json() if len(scores["games"]) == 0: click.secho("No live action currently", fg="red", bold=True) return for score in scores['games']: d = {} d['homeTeam'] = {'name': score['homeTeamName']} d['awayTeam'] = {'name': score['awayTeamName']} d['score'] = {'fullTime': {'homeTeam': score['goalsHomeTeam'], 'awayTeam': score['goalsAwayTeam']}} d['league'] = score['league'] d['time'] = score['time'] scores_data.append(d) self.writer.live_scores(scores_data) else: click.secho("There was problem getting live scores", fg="red", bold=True)
Gets the live scores
def nose_selector(test): address = test_address(test) if address: file, module, rest = address if module: if rest: try: return '%s:%s%s' % (module, rest, test.test.arg or '') except AttributeError: return '%s:%s' % (module, rest) else: return module return 'Unknown test'
Return the string you can pass to nose to run `test`, including argument values if the test was made by a test generator. Return "Unknown test" if it can't construct a decent path.
def _save_image(image, filename, return_img=None): if not image.size: raise Exception('Empty image. Have you run plot() first?') if isinstance(filename, str): if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) if not return_img: return imageio.imwrite(filename, image) imageio.imwrite(filename, image) return image
Internal helper for saving a NumPy image array
def brightness(sequence_number, brightness): return MessageWriter().string("brightness").uint64(sequence_number).uint8(int(brightness*255)).get()
Create a brightness message
def reset_state(self): self.last_helo_response = (None, None) self.last_ehlo_response = (None, None) self.supports_esmtp = False self.esmtp_extensions = {} self.auth_mechanisms = [] self.ssl_context = False self.reader = None self.writer = None self.transport = None
Resets some attributes to their default values. This is especially useful when initializing a newly created :class:`SMTP` instance and when closing an existing SMTP session. It allows us to use the same SMTP instance and connect several times.
def get_redirect_location(self): if self.status in self.REDIRECT_STATUSES: return self.headers.get('location') return False
Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code.
def process_next_position(data, cgi_input, header, reference, var_only): if data[2] == "all" or data[1] == "1": out = process_full_position(data=data, header=header, var_only=var_only) else: assert data[2] == "1" out = process_split_position( data=data, cgi_input=cgi_input, header=header, reference=reference, var_only=var_only) if out: vcf_lines = [vcf_line(input_data=l, reference=reference) for l in out if l['chrom'] != 'chrM'] return [vl for vl in vcf_lines if not (var_only and vl.rstrip().endswith('./.'))]
Determine appropriate processing to get data, then convert it to VCF There are two types of lines in the var file: - "full position": single allele (hemizygous) or all-allele line All alleles at this position are represented in this line. This is handled with "process_full_position". - "split position": each of two alleles is reported separately. There will be at least two lines, one for each allele (but potentially more). This is handled with "process_split_position". Because the number of lines used for separately reported alleles is unknown, process_split_position will always read ahead to the next "full position" and return that as well. So the returned line formats are consistent, process_next_position returns an array, even if there's only one line.
def get_imported_modules(self, module): for name, member in inspect.getmembers(module): if inspect.ismodule(member): yield name, member
Returns the list of modules imported from `module`.
def failed_update(self, exception): f = None with self._lock: if self._future: f = self._future self._future = None if f: f.failure(exception) self._last_refresh_ms = time.time() * 1000
Update cluster state given a failed MetadataRequest.
def clone(self, callable=None, **overrides): old = {k: v for k, v in self.get_param_values() if k not in ['callable', 'name']} params = dict(old, **overrides) callable = self.callable if callable is None else callable return self.__class__(callable, **params)
Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object
def option_vip_by_environmentvip(self, environment_vip_id): uri = 'api/v3/option-vip/environment-vip/%s/' % environment_vip_id return super(ApiVipRequest, self).get(uri)
List Option Vip by Environment Vip param environment_vip_id: Id of Environment Vip
def process_file(self, path, dryrun): if dryrun: return path ret = [] with open(path, "r") as infile: for line in infile: if re.search(self.__exp, line): ret.append(line) return ret if len(ret) > 0 else None
Print files path.
def stop_recording(self): self._stop_recording.set() with self._source_lock: self._source.stop() self._recording = False
Stop recording from the audio source.
def save(self, key, kw): if key not in self: obj = hdf5.LiteralAttrs() else: obj = self[key] vars(obj).update(kw) self[key] = obj self.flush()
Update the object associated to `key` with the `kw` dictionary; works for LiteralAttrs objects and automatically flushes.
def GetRouterForUser(self, username): for index, router in enumerate(self.routers): router_id = str(index) if self.auth_manager.CheckPermissions(username, router_id): logging.debug("Matched router %s to user %s", router.__class__.__name__, username) return router logging.debug("No router ACL rule match for user %s. Using default " "router %s", username, self.default_router.__class__.__name__) return self.default_router
Returns a router corresponding to a given username.
def uninstalled(name): ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not __salt__['wusa.is_installed'](name): ret['result'] = True ret['comment'] = '{0} already uninstalled'.format(name) return ret if __opts__['test'] is True: ret['result'] = None ret['comment'] = '{0} would be uninstalled'.format(name) ret['result'] = None return ret __salt__['wusa.uninstall'](name) if not __salt__['wusa.is_installed'](name): ret['comment'] = '{0} was uninstalled'.format(name) ret['changes'] = {'old': True, 'new': False} ret['result'] = True else: ret['comment'] = '{0} failed to uninstall'.format(name) return ret
Ensure an update is uninstalled from the minion Args: name(str): Name of the Windows KB ("KB123456") Example: .. code-block:: yaml KB123456: wusa.uninstalled
def set_num_special_tokens(self, num_special_tokens): " Update input embeddings with new embedding matrice if needed " if self.config.n_special == num_special_tokens: return self.config.n_special = num_special_tokens old_embed = self.tokens_embed self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd) self.tokens_embed.to(old_embed.weight.device) self.init_weights(self.tokens_embed) self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
Update input embeddings with new embedding matrice if needed
def _translate(unistr, table): if type(unistr) is str: try: unistr = unistr.decode('utf-8') except AttributeError: pass try: if type(unistr) is not unicode: return unistr except NameError: pass chars = [] for c in unistr: replacement = table.get(c) chars.append(replacement if replacement else c) return u''.join(chars)
Replace characters using a table.
def merge(self, ontologies): if self.xref_graph is None: self.xref_graph = nx.MultiGraph() logger.info("Merging source: {} xrefs: {}".format(self, len(self.xref_graph.edges()))) for ont in ontologies: logger.info("Merging {} into {}".format(ont, self)) g = self.get_graph() srcg = ont.get_graph() for n in srcg.nodes(): g.add_node(n, **srcg.node[n]) for (o,s,m) in srcg.edges(data=True): g.add_edge(o,s,**m) if ont.xref_graph is not None: for (o,s,m) in ont.xref_graph.edges(data=True): self.xref_graph.add_edge(o,s,**m)
Merges specified ontology into current ontology
def reference_source_point(self): xref = isinstance(self.xref, Quantity) and self.xref.value or self.xref yref = isinstance(self.yref, Quantity) and self.yref.value or self.yref return xref + self.x_ref_offset, yref + self.y_ref_offset
The location of the source in the reference image, in terms of the current image coordinates.
def _add_ssh_key(ret): priv = None if __opts__.get('ssh_use_home_key') and os.path.isfile(os.path.expanduser('~/.ssh/id_rsa')): priv = os.path.expanduser('~/.ssh/id_rsa') else: priv = __opts__.get( 'ssh_priv', os.path.abspath(os.path.join( __opts__['pki_dir'], 'ssh', 'salt-ssh.rsa' )) ) if priv and os.path.isfile(priv): ret['priv'] = priv
Setups the salt-ssh minion to be accessed with salt-ssh default key
def _get_time_override(self): if callable(self.time_override): time_override = self.time_override() else: time_override = self.time_override if not isinstance(time_override, datetime_time): raise ValueError( 'Invalid type. Must be a datetime.time instance.' ) return time_override
Retrieves the datetime.time or None from the `time_override` attribute.
def _get_proxy_info(self, _=None): (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint) sock = None if target_path: sock = self._ssh_tunnel.forward_unix(path=target_path) else: sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port) return SSHTunnelProxyInfo(sock=sock)
Generate a ProxyInfo class from a connected SSH transport Args: _ (None): Ignored. This is just here as the ProxyInfo spec requires it. Returns: SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
def run(self, realm, users): existing_users = [] for user in users: logging.debug('Probing user %s' % user) req = KerberosUserEnum.construct_tgt_req(realm, user) rep = self.ksoc.sendrecv(req.dump(), throw = False) if rep.name != 'KRB_ERROR': existing_users.append(user) elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value: continue else: existing_users.append(user) return existing_users
Requests a TGT in the name of the users specified in users. Returns a list of usernames that are in the domain. realm: kerberos realm (domain name of the corp) users: list : list of usernames to test
def is_executable(exe_name): if not isinstance(exe_name, str): raise TypeError('Executable name must be a string.') def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(exe_name) if not fpath: res = any([is_exe(os.path.join(path, exe_name)) for path in os.environ["PATH"].split(os.pathsep)]) else: res = is_exe(exe_name) if not res: raise IOError('{} does not appear to be a valid executable on this ' 'system.'.format(exe_name))
Check if Input is Executable This methid checks if the input executable exists. Parameters ---------- exe_name : str Executable name Returns ------- Bool result of test Raises ------ TypeError For invalid input type
def b64_hmac_md5(key, data): bdigest = base64.b64encode(hmac.new(key, data, _md5).digest()).strip().decode("utf-8") return re.sub('=+$', '', bdigest)
return base64-encoded HMAC-MD5 for key and data, with trailing '=' stripped.
def check_venv(self): if self.zappa: venv = self.zappa.get_current_venv() else: venv = Zappa.get_current_venv() if not venv: raise ClickException( click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" + "Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan"))
Ensure we're inside a virtualenv.
def signal_terminate(on_terminate): for i in [signal.SIGINT, signal.SIGQUIT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGTERM]: signal.signal(i, on_terminate)
a common case program termination signal
def endpoint_check( first, node_first, s, second, node_second, t, intersections ): r if _helpers.vector_close(node_first, node_second): orig_s = (1 - s) * first.start + s * first.end orig_t = (1 - t) * second.start + t * second.end add_intersection(orig_s, orig_t, intersections)
r"""Check if curve endpoints are identical. .. note:: This is a helper for :func:`tangent_bbox_intersection`. These functions are used (directly or indirectly) by :func:`_all_intersections` exclusively, and that function has a Fortran equivalent. Args: first (SubdividedCurve): First curve being intersected (assumed in :math:\mathbf{R}^2`). node_first (numpy.ndarray): 1D ``2``-array, one of the endpoints of ``first``. s (float): The parameter corresponding to ``node_first``, so expected to be one of ``0.0`` or ``1.0``. second (SubdividedCurve): Second curve being intersected (assumed in :math:\mathbf{R}^2`). node_second (numpy.ndarray): 1D ``2``-array, one of the endpoints of ``second``. t (float): The parameter corresponding to ``node_second``, so expected to be one of ``0.0`` or ``1.0``. intersections (list): A list of already encountered intersections. If these curves intersect at their tangency, then those intersections will be added to this list.
def calculate_moment(psd_f, psd_amp, fmin, fmax, f0, funct, norm=None, vary_fmax=False, vary_density=None): psd_x = psd_f / f0 deltax = psd_x[1] - psd_x[0] mask = numpy.logical_and(psd_f > fmin, psd_f < fmax) psdf_red = psd_f[mask] comps_red = psd_x[mask] ** (-7./3.) * funct(psd_x[mask], f0) * deltax / \ psd_amp[mask] moment = {} moment[fmax] = comps_red.sum() if norm: moment[fmax] = moment[fmax] / norm[fmax] if vary_fmax: for t_fmax in numpy.arange(fmin + vary_density, fmax, vary_density): moment[t_fmax] = comps_red[psdf_red < t_fmax].sum() if norm: moment[t_fmax] = moment[t_fmax] / norm[t_fmax] return moment
Function for calculating one of the integrals used to construct a template bank placement metric. The integral calculated will be \int funct(x) * (psd_x)**(-7./3.) * delta_x / PSD(x) where x = f / f0. The lower frequency cutoff is given by fmin, see the parameters below for details on how the upper frequency cutoff is chosen Parameters ----------- psd_f : numpy.array numpy array holding the set of evenly spaced frequencies used in the PSD psd_amp : numpy.array numpy array holding the PSD values corresponding to the psd_f frequencies fmin : float The lower frequency cutoff used in the calculation of the integrals used to obtain the metric. fmax : float The upper frequency cutoff used in the calculation of the integrals used to obtain the metric. This can be varied (see the vary_fmax option below). f0 : float This is an arbitrary scaling factor introduced to avoid the potential for numerical overflow when calculating this. Generally the default value (70) is safe here. **IMPORTANT, if you want to calculate the ethinca metric components later this MUST be set equal to f_low.** funct : Lambda function The function to use when computing the integral as described above. norm : Dictionary of floats If given then moment[f_cutoff] will be divided by norm[f_cutoff] vary_fmax : boolean, optional (default False) If set to False the metric and rotations are calculated once, for the full range of frequency [f_low,f_upper). If set to True the metric and rotations are calculated multiple times, for frequency ranges [f_low,f_low + i*vary_density), where i starts at 1 and runs up until f_low + (i+1)*vary_density > f_upper. Thus values greater than f_upper are *not* computed. The calculation for the full range [f_low,f_upper) is also done. vary_density : float, optional If vary_fmax is True, this will be used in computing the frequency ranges as described for vary_fmax. Returns -------- moment : Dictionary of floats moment[f_cutoff] will store the value of the moment at the frequency cutoff given by f_cutoff.
def pdf(self, mu): if self.transform is not None: mu = self.transform(mu) if mu < self.lower and self.lower is not None: return 0.0 elif mu > self.upper and self.upper is not None: return 0.0 else: return (1/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
def on_directory_button_tool_clicked(self): input_path = self.layer.currentLayer().source() input_directory, self.output_filename = os.path.split(input_path) file_extension = os.path.splitext(self.output_filename)[1] self.output_filename = os.path.splitext(self.output_filename)[0] output_path, __ = QtWidgets.QFileDialog.getSaveFileName( self, self.tr('Output file'), '%s_multi_buffer%s' % ( os.path.join(input_directory, self.output_filename), file_extension), 'GeoJSON (*.geojson);;Shapefile (*.shp)') self.output_form.setText(output_path)
Autoconnect slot activated when directory button is clicked.
def cidr_netmask(cidr): ips = netaddr.IPNetwork(cidr) return six.text_type(ips.netmask)
Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20
def parse_second_row(row, url): tags = row.findall('./td') category, subcategory, quality, language = Parser.parse_torrent_properties(tags[0]) user_info = tags[1].find('./a') user = user_info.text_content() user_url = url.combine(user_info.get('href')) torrent_link = Parser.parse_torrent_link(tags[2]) size = tags[3].text comments = tags[4].text times_completed = tags[5].text seeders = tags[6].text leechers = tags[7].text return [category, subcategory, quality, language, user, user_url, torrent_link, size, comments, times_completed, seeders, leechers]
Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list
def get_prices(self) -> List[PriceModel]: from pricedb.dal import Price pricedb = PriceDbApplication() repo = pricedb.get_price_repository() query = (repo.query(Price) .filter(Price.namespace == self.security.namespace) .filter(Price.symbol == self.security.mnemonic) .orderby_desc(Price.date) ) return query.all()
Returns all available prices for security
def GetClientConfig(filename): config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() context = list(grr_config.CONFIG.context) context.append("Client Context") deployer = build.ClientRepacker() config_data = deployer.GetClientConfig( context, validate=True, deploy_timestamp=False) builder = build.ClientBuilder() with open(filename, "w") as fd: fd.write(config_data) builder.WriteBuildYaml(fd, build_timestamp=False)
Write client config to filename.
def _round_half_hour(record): k = record.datetime + timedelta(minutes=-(record.datetime.minute % 30)) return datetime(k.year, k.month, k.day, k.hour, k.minute, 0)
Round a time DOWN to half nearest half-hour.
def corpora(self, full=False): url = self.base_url + self.CORPORA_PAGE class_ = Corpus results = self._retrieve_resources(url, class_, full) return results
Return list of corpora owned by user. If `full=True`, it'll download all pages returned by the HTTP server
def make(parser): s = parser.add_subparsers( title='commands', metavar='COMMAND', help='description', ) def create_manila_db_f(args): create_manila_db(args) create_manila_db_parser = create_manila_db_subparser(s) create_manila_db_parser.set_defaults(func=create_manila_db_f) def create_service_credentials_f(args): create_service_credentials(args) create_service_credentials_parser = create_service_credentials_subparser(s) create_service_credentials_parser.set_defaults(func=create_service_credentials_f) def install_f(args): install(args) install_parser = install_subparser(s) install_parser.set_defaults(func=install_f)
provison Manila with HA
def skip(type_name, filename): report = ['Skipping {} file: {}'.format(type_name, filename)] report_stats = ReportStats(filename, report=report) return report_stats
Provide reporting statistics for a skipped file.
def component_offsetvectors(offsetvectors, n): delta_sets = {} for vect in offsetvectors: for instruments in iterutils.choices(sorted(vect), n): delta_sets.setdefault(instruments, set()).add(tuple(vect[instrument] - vect[instruments[0]] for instrument in instruments)) return [offsetvector(zip(instruments, deltas)) for instruments, delta_set in delta_sets.items() for deltas in delta_set]
Given an iterable of offset vectors, return the shortest list of the unique n-instrument offset vectors from which all the vectors in the input iterable can be constructed. This can be used to determine the minimal set of n-instrument coincs required to construct all of the coincs for all of the requested instrument and offset combinations in a set of offset vectors. It is assumed that the coincs for the vector {"H1": 0, "H2": 10, "L1": 20} can be constructed from the coincs for the vectors {"H1": 0, "H2": 10} and {"H2": 0, "L1": 10}, that is only the relative offsets are significant in determining if two events are coincident, not the absolute offsets. This assumption is not true for the standard inspiral pipeline, where the absolute offsets are significant due to the periodic wrapping of triggers around rings.
def analyse_text(text): result = 0.0 lines = text.split('\n') if len(lines) > 0: if JclLexer._JOB_HEADER_PATTERN.match(lines[0]): result = 1.0 assert 0.0 <= result <= 1.0 return result
Recognize JCL job by header.
def uri_query(self): value = [] for option in self.options: if option.number == defines.OptionRegistry.URI_QUERY.number: value.append(str(option.value)) return "&".join(value)
Get the Uri-Query of a request. :return: the Uri-Query :rtype : String :return: the Uri-Query string
def escape_filename_sh(name): for ch in name: if ord(ch) < 32: return escape_filename_sh_ansic(name) name.replace('\\','\\\\') name.replace('"','\\"') name.replace('`','\\`') name.replace('$','\\$') return '"'+name+'"'
Return a hopefully safe shell-escaped version of a filename.
def parse_journal_file(journal_file): counter = count() for block in read_next_block(journal_file): block = remove_nullchars(block) while len(block) > MIN_RECORD_SIZE: header = RECORD_HEADER.unpack_from(block) size = header[0] try: yield parse_record(header, block[:size]) next(counter) except RuntimeError: yield CorruptedUsnRecord(next(counter)) finally: block = remove_nullchars(block[size:]) journal_file.seek(- len(block), 1)
Iterates over the journal's file taking care of paddings.
def https_proxy(self): if os.getenv('HTTPS_PROXY'): return os.getenv('HTTPS_PROXY') if os.getenv('https_proxy'): return os.getenv('https_proxy') return self.get_options().https_proxy
Set ivy to use an https proxy. Expects a string of the form http://<host>:<port>
def get(self, path_segment="", owner=None, app=None, sharing=None, **query): if path_segment.startswith('/'): path = path_segment else: path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing) return self.service.get(path, owner=owner, app=app, sharing=sharing, **query)
Performs a GET operation on the path segment relative to this endpoint. This method is named to match the HTTP method. This method makes at least one roundtrip to the server, one additional round trip for each 303 status returned, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. If *owner*, *app*, and *sharing* are omitted, this method takes a default namespace from the :class:`Service` object for this :class:`Endpoint`. All other keyword arguments are included in the URL as query parameters. :raises AuthenticationError: Raised when the ``Service`` is not logged in. :raises HTTPError: Raised when an error in the request occurs. :param path_segment: A path segment relative to this endpoint. :type path_segment: ``string`` :param owner: The owner context of the namespace (optional). :type owner: ``string`` :param app: The app context of the namespace (optional). :type app: ``string`` :param sharing: The sharing mode for the namespace (optional). :type sharing: "global", "system", "app", or "user" :param query: All other keyword arguments, which are used as query parameters. :type query: ``string`` :return: The response from the server. :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, and ``status`` **Example**:: import splunklib.client s = client.service(...) apps = s.apps apps.get() == \\ {'body': ...a response reader object..., 'headers': [('content-length', '26208'), ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), ('server', 'Splunkd'), ('connection', 'close'), ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), ('date', 'Fri, 11 May 2012 16:30:35 GMT'), ('content-type', 'text/xml; charset=utf-8')], 'reason': 'OK', 'status': 200} apps.get('nonexistant/path') # raises HTTPError s.logout() apps.get() # raises AuthenticationError
def make_hop_info_from_url(url, verify_reachability=None): parsed = urlparse(url) username = None if parsed.username is None else unquote(parsed.username) password = None if parsed.password is None else unquote(parsed.password) try: enable_password = parse_qs(parsed.query)["enable_password"][0] except KeyError: enable_password = None hop_info = HopInfo( parsed.scheme, parsed.hostname, username, password, parsed.port, enable_password, verify_reachability=verify_reachability ) if hop_info.is_valid(): return hop_info raise InvalidHopInfoError
Build HopInfo object from url. It allows only telnet and ssh as a valid protocols. Args: url (str): The url string describing the node. i.e. telnet://username@1.1.1.1. The protocol, username and address portion of url is mandatory. Port and password is optional. If port is missing the standard protocol -> port mapping is done. The password is optional i.e. for TS access directly to console ports. The path part is treated as additional password required for some systems, i.e. enable password for IOS devices.: telnet://<username>:<password>@<host>:<port>/<enable_password> <enable_password> is optional verify_reachability: This is optional callable returning boolean if node is reachable. It can be used to verify reachability of the node before making a connection. It can speedup the connection process when node not reachable especially with telnet having long timeout. Returns: HopInfo object or None if url is invalid or protocol not supported
def report_server_init_errors(address=None, port=None, **kwargs): try: yield except EnvironmentError as e: if e.errno == errno.EADDRINUSE: log.critical("Cannot start Bokeh server, port %s is already in use", port) elif e.errno == errno.EADDRNOTAVAIL: log.critical("Cannot start Bokeh server, address '%s' not available", address) else: codename = errno.errorcode[e.errno] log.critical("Cannot start Bokeh server [%s]: %r", codename, e) sys.exit(1)
A context manager to help print more informative error messages when a ``Server`` cannot be started due to a network problem. Args: address (str) : network address that the server will be listening on port (int) : network address that the server will be listening on Example: .. code-block:: python with report_server_init_errors(**server_kwargs): server = Server(applications, **server_kwargs) If there are any errors (e.g. port or address in already in use) then a critical error will be logged and the process will terminate with a call to ``sys.exit(1)``
def computeDistortion(self, eEye, fU, fV): fn = self.function_table.computeDistortion pDistortionCoordinates = DistortionCoordinates_t() result = fn(eEye, fU, fV, byref(pDistortionCoordinates)) return result, pDistortionCoordinates
Gets the result of the distortion function for the specified eye and input UVs. UVs go from 0,0 in the upper left of that eye's viewport and 1,1 in the lower right of that eye's viewport. Returns true for success. Otherwise, returns false, and distortion coordinates are not suitable.
def _call_method_from_namespace(obj, method_name, namespace): method = getattr(obj, method_name) method_parser = method.parser arg_names = _get_args_name_from_parser(method_parser) if method_name == "__init__": return _call(obj, arg_names, namespace) return _call(method, arg_names, namespace)
Call the method, retrieved from obj, with the correct arguments via the namespace Args: obj: any kind of object method_name: method to be called namespace: an argparse.Namespace object containing parsed command line arguments
def _set_config(c): glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c['red_size']) glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c['green_size']) glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c['blue_size']) glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c['alpha_size']) glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0) glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0) glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0) glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0) glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c['depth_size']) glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c['stencil_size']) glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c['samples']) glfw.glfwWindowHint(glfw.GLFW_STEREO, c['stereo']) if not c['double_buffer']: raise RuntimeError('GLFW must double buffer, consider using a ' 'different backend, or using double buffering')
Set gl configuration for GLFW
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): callsign = callsign.strip().upper() if self._lookuptype == "clublogxml": return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign) return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index) raise KeyError
Returns a CQ Zone if an exception exists for the given callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: Value of the the CQ Zone exception which exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN. >>> from pyhamtools import LookupLib >>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey") >>> print my_lookuplib.lookup_zone_exception("DP0GVN") 38 The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore in CQ Zone 38 Note: This method is available for - clublogxml - redis
def readView(self, newLength=None): if newLength is None: newLength = self.len result = self.peekView(newLength) self.skip(newLength) return result
Return a view of the next newLength bytes, and skip it.
def error2str(e): out = StringIO() traceback.print_exception(None, e, e.__traceback__, file=out) out.seek(0) return out.read()
returns the formatted stacktrace of the exception `e`. :param BaseException e: an exception to format into str :rtype: str
def db_restore(self, block_number=None): restored = False if block_number is not None: try: self.backup_restore(block_number, self.impl, self.working_dir) restored = True except AssertionError: log.error("Failed to restore state from {}".format(block_number)) return False else: backup_blocks = self.get_backup_blocks(self.impl, self.working_dir) for block_number in reversed(sorted(backup_blocks)): try: self.backup_restore(block_number, self.impl, self.working_dir) restored = True log.debug("Restored state from {}".format(block_number)) break except AssertionError: log.debug("Failed to restore state from {}".format(block_number)) continue if not restored: log.error("Failed to restore state from {}".format(','.join(backup_blocks))) return False self.db_set_indexing(False, self.impl, self.working_dir) return self.db_setup()
Restore the database and clear the indexing lockfile. Restore to a given block if given; otherwise use the most recent valid backup. Return True on success Return False if there is no state to restore Raise exception on error
def get_assessment_query_session_for_bank(self, bank_id, proxy): if not self.supports_assessment_query(): raise errors.Unimplemented() return sessions.AssessmentQuerySession(bank_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the assessment query service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentQuerySession) - ``an _assessment_query_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_query()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_query()`` and ``supports_visible_federation()`` are ``true``.*
def raise_for_response(self, responses): exception_messages = [self.client.format_exception_message(response) for response in responses] if len(exception_messages) == 1: message = exception_messages[0] else: message = "[%s]" % ", ".join(exception_messages) raise PostmarkerException(message)
Constructs appropriate exception from list of responses and raises it.
def _call_custom_creator(self, config): creator = self._custom_creators[config['driver']](config) if isinstance(creator, Store): creator = self.repository(creator) if not isinstance(creator, Repository): raise RuntimeError('Custom creator should return a Repository instance.') return creator
Call a custom driver creator. :param config: The driver configuration :type config: dict :rtype: Repository
def bucket(self, bucket_name, user_project=None): return Bucket(client=self, name=bucket_name, user_project=user_project)
Factory constructor for bucket object. .. note:: This will not make an HTTP request; it simply instantiates a bucket object owned by this client. :type bucket_name: str :param bucket_name: The name of the bucket to be instantiated. :type user_project: str :param user_project: (Optional) the project ID to be billed for API requests made via the bucket. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket object created.
def retrieve_request(self, url): try: data = urlopen(url) except: print("Error Retrieving Data from Steam") sys.exit(2) return data.read().decode('utf-8')
Open the given url and decode and return the response url: The url to open.
def delete_file(self, instance, sender, **kwargs): file_ = getattr(instance, self.attname) query = Q(**{self.name: file_.name}) & ~Q(pk=instance.pk) qs = sender._default_manager.filter(query) if (file_ and file_.name != self.default and not qs): default.backend.delete(file_) elif file_: file_.close()
Adds deletion of thumbnails and key value store references to the parent class implementation. Only called in Django < 1.2.5
def load(self, model, value): try: return self._cattrs_converter.structure(value, model) except (ValueError, TypeError) as e: raise SerializationException(str(e))
Converts unstructured data into structured data, recursively.