docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Scp a remote file to local Args: remote_path (str) local_path (str)
def scp_file_remote_to_local(self, remote_path, local_path): sshadd_command = [ 'ssh-add', '/Users/pyrat/.ssh/ubuntuNode' ] self.info_log( "executing command: %s" % ' '.join(sshadd_command) ) p = subprocess.Popen(sshadd_co...
1,054,908
generate background term from SNPs Args: vTot: variance of Yc+Yi vCommon: variance of Yc XX: kinship matrix a: common scales, it can be set for debugging purposes c: indipendent scales, it can be set for debugging purposes
def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None): vSpecific = vTot-vCommon SP.random.seed(0) if c==None: c = SP.randn(self.P) XX += 1e-3 * SP.eye(XX.shape[0]) L = LA.cholesky(XX,lower=True) # common effect R = self.genWeights(self.N,self.P) ...
1,055,177
Decorator for warning user of depricated functions before use. Args: newmethod (str): Name of method to use instead.
def depricated_name(newmethod): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn( "Function {} is depricated, please use {} instead.".format(func.__name__, newmethod), ...
1,055,274
load data file Args: cache_genotype: load genotypes fully into memory (default: False) cache_phenotype: load phentopyes fully intro memry (default: True)
def load(self,cache_genotype=False,cache_phenotype=True): self.f = h5py.File(self.file_name,'r') self.pheno = self.f['phenotype'] self.geno = self.f['genotype'] #TODO: load all row and column headers for genotype and phenotype #parse out thse we alwasy need for conveni...
1,055,395
sample a particular set of individuals (Irow) or phenotypes (Icol_pheno) or genotypes (Icol_geno) Args: Irow: indices for a set of individuals Icol_pheno: indices for a set of phenotypes Icol_geno: indices for a set of SNPs Returns...
def subSample(self,Irow=None,Icol_geno=None,Icol_pheno=None): C = copy.copy(self) if Irow is not None: C.genoM = C.genoM[Irow] C.phenoM = C.phenoM[Irow] C.sample_ID = C.sample_ID[Irow] if Icol_geno is not None: C.genoM = C.genoM[:,Icol_gen...
1,055,402
Parse the test config to a dictionary Args: test_config_string (str) this string come from the --test-config flag of the bro executable run command
def test_config_to_dict(test_config_string): test_config = {} if test_config_string: for config in test_config_string.split(','): key, value = config.split('=') test_config[key] = value return test_config
1,055,407
Parse the browser config and look for brome specific config Args: browser_config (dict)
def parse_brome_config_from_browser_config(browser_config): config = {} brome_keys = [key for key in browser_config if key.find(':') != -1] for brome_key in brome_keys: section, option = brome_key.split(':') value = browser_config[brome_key] if section not in config: ...
1,055,408
Calculate digest of a readable object Args: origin -- a readable object for which calculate digest algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms. block_size -- the size of the block to read at each iteration
def calc_digest(origin, algorithm="sha1", block_size=None): try: hashM = hashlib.new(algorithm) except ValueError: raise ValueError('hash algorithm not supported by the underlying platform: "{0}"'.format(algorithm)) while True: chunk = origin.read(block_size) if block_size else...
1,055,621
Returns all possible suffixes of an array (lazy evaluated) Args: arr: input array Returns: Array of all possible suffixes (as tuples)
def get_suffixes(arr): arr = tuple(arr) return [arr] return (arr[i:] for i in range(len(arr)))
1,055,749
Randomly choses an item according to defined weights Args: item_probabilities: list of (item, probability)-tuples Returns: random item according to the given weights
def weighted_choice(item_probabilities): probability_sum = sum(x[1] for x in item_probabilities) assert probability_sum > 0 random_value = random.random() * probability_sum summed_probability = 0 for item, value in item_probabilities: summed_probability += value if summed_probab...
1,055,750
Convenience method to sample from this distribution. Args: size (int or tuple): Shape of return value. Each element is drawn independently from this distribution.
def rvs(self, size=1): return np.random.multivariate_normal(self.mean, self.cov, size)
1,055,802
calculate digest for the given file or readable/seekable object Args: origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...) Returns: String rapresenting the digest for the given origin
def _calc_digest(self, origin): if hasattr(origin, 'read') and hasattr(origin, 'seek'): pos = origin.tell() digest = hashtools.calc_digest(origin, algorithm=self._conf['hash_alg']) origin.seek(pos) else: digest = hashtools.calc_file_digest(origin,...
1,056,062
Make folders recursively for the given path and check read and write permission on the path Args: path -- path to the leaf folder
def _makedirs(self, path): try: oldmask = os.umask(0) os.makedirs(path, self._conf['dmode']) os.umask(oldmask) except OSError as e: if(e.errno == errno.EACCES): raise Exception('not sufficent permissions to write on fsdb folder: "{...
1,056,065
Add new element to fsdb. Args: origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...) Returns: String rapresenting the digest of the file
def add(self, origin): digest = self._calc_digest(origin) if self.exists(digest): self.logger.debug('Added File: [{0}] ( Already exists. Skipping transfer)'.format(digest)) return digest absPath = self.get_file_path(digest) absFolderPath = os.path.dirn...
1,056,066
Remove an existing file from fsdb. File with the given digest will be removed from fsdb and the directory tree will be cleaned (remove empty folders) Args: digest -- digest of the file to remove
def remove(self, digest): # remove file absPath = self.get_file_path(digest) os.remove(absPath) # clean directory tree tmpPath = os.path.dirname(absPath) while tmpPath != self.fsdbRoot: if os.path.islink(tmpPath): raise Exception('fsd...
1,056,067
Retrieve the absolute path to the file with the given digest Args: digest -- digest of the file Returns: String rapresenting the absolute path of the file
def get_file_path(self, digest): relPath = Fsdb.generate_tree_path(digest, self._conf['depth']) return os.path.join(self.fsdbRoot, relPath)
1,056,069
Check the integrity of the file with the given digest Args: digest -- digest of the file to check Returns: True if the file is not corrupted
def check(self, digest): path = self.get_file_path(digest) if self._calc_digest(path) != digest: self.logger.warning("found corrupted file: '{0}'".format(path)) return False return True
1,056,070
Generate a relative path from the given fileDigest relative path has a numbers of directories levels according to @depth Args: fileDigest -- digest for which the relative path will be generate depth -- number of levels to use in relative path generation Returns: ...
def generate_tree_path(fileDigest, depth): if(depth < 0): raise Exception("depth level can not be negative") if(os.path.split(fileDigest)[1] != fileDigest): raise Exception("fileDigest cannot contain path separator") # calculate min length for the given depth (2...
1,056,075
estimate power for a given allele frequency, effect size beta and sample size N Assumption: z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) ) Arguments: maf: minor allele frequency of the ...
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8): assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf if beta<0.0: beta=-beta std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf))) non_centrality = beta beta_samples = np.random.normal(loc=non_centrality, scale=std_beta) n_grid = 1000...
1,056,242
Convert a size value in bytes to its equivalent in IEC notation. See `<http://physics.nist.gov/cuu/Units/binary.html>`_. Parameters: size (int): Number of bytes. compact (bool): If ``True``, the result contains no spaces. Return: String representation of ``...
def bytes2iec(size, compact=False): postfn = lambda text: text.replace(' ', '') if compact else text if size < 0: raise ValueError("Negative byte size value {}".format(size)) if size < 1024: return postfn('{:4d} bytes'.format(size)) scaled = size for iec_unit in IEC_UNITS[1:]: ...
1,056,295
Convert a size specification, optionally containing a scaling unit in IEC notation, to a number of bytes. Parameters: size_spec (str): Number, optionally followed by a unit. only_positive (bool): Allow only positive values? Return: Numeric bytes size. ...
def iec2bytes(size_spec, only_positive=True): scale = 1 try: size = int(0 + size_spec) # return numeric values as-is except (TypeError, ValueError): spec = size_spec.strip().lower() for exp, iec_unit in enumerate(IEC_UNITS[1:], 1): iec_unit = iec_unit.lower() ...
1,056,296
Merge adjacent numbers in an iterable of numbers. Parameters: numbers (list): List of integers or numeric strings. indicator (str): Delimiter to indicate generated ranges. base (int): Passed to the `int()` conversion when comparing numbers. Return: list ...
def merge_adjacent(numbers, indicator='..', base=0): integers = list(sorted([(int("%s" % i, base), i) for i in numbers])) idx = 0 result = [] while idx < len(numbers): end = idx + 1 while end < len(numbers) and integers[end-1][0] == integers[end][0] - 1: end += 1 ...
1,056,297
Generates sentences from a given corpus Args: generation_type: 'markov' | 'hmm' | 'hmm_past' Returns: Properly formatted string of generated sentences
def generate_text(self, generation_type='markov'): assert generation_type in ['markov', 'hmm', 'hmm_past'] if generation_type == "markov": return self._text_generator(next_token=self._generate_next_token) elif generation_type == "hmm": return self._text_generator...
1,056,317
Execute a command Args: command (str) Returns: process (object)
def execute_command(self, command): self.runner.info_log("Executing command: %s" % command) process = Popen( command, stdout=open(os.devnull, 'w'), stderr=open('runner.log', 'a'), ) return process
1,056,328
Calculate how many padding bytes needed for ``fmt`` to be aligned to ``align``. Args: fmt (str): :mod:`struct` format. align (int): alignment (2, 4, 8, etc.) Returns: str: padding format (e.g., various number of 'x'). >>> calc_padding('b', 2) 'x' >>> calc_padding('b',...
def calc_padding(fmt, align): remain = struct.calcsize(fmt) % align if remain == 0: return "" return 'x' * (align - remain)
1,056,372
Align ``offset`` up to ``align`` boundary. Args: offset (int): value to be aligned. align (int): alignment boundary. Returns: int: aligned offset. >>> align_up(3, 2) 4 >>> align_up(3, 1) 3
def align_up(offset, align): remain = offset % align if remain == 0: return offset else: return offset + (align - remain)
1,056,373
Convert 6 bytes into a MAC string. Args: bin (str): hex string of lenth 6. Returns: str: String representation of the MAC address in lower case. Raises: Exception: if ``len(bin)`` is not 6.
def bin_to_mac(bin, size=6): if len(bin) != size: raise Exception("Invalid MAC address: %s" % (bin)) return ':'.join([binascii.hexlify(o) for o in bin])
1,056,374
Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will ...
def register_event(self, direction, verb, child_fn, priority=10): event_managers = [] if direction in ('in', 'both'): event_managers.append(self._events_in) if direction in ('out', 'both'): event_managers.append(self._events_out) if direction == 'girc': ...
1,056,499
Sets user info for this server, to be used before connection. Args: nick (str): Nickname to use. user (str): Username to use. real (str): Realname to use.
def set_user_info(self, nick, user='*', real='*'): if self.connected: raise Exception("Can't set user info now, we're already connected!") # server will pickup list when they exist if not self.connected: self.nick = nick self.connect_info['user'] = { ...
1,056,500
Connects to the given server. Args: auto_reconnect (bool): Automatically reconnect on disconnection. Other arguments to this function are as usually supplied to :meth:`asyncio.BaseEventLoop.create_connection`.
def connect(self, *args, auto_reconnect=False, **kwargs): connection_info = { 'auto_reconnect': auto_reconnect, 'args': args, 'kwargs': kwargs, } self.connect_info['connection'] = connection_info # confirm we have user info set if 'us...
1,056,505
Authenticate to a server using SASL plain, or does so on connection. Args: name (str): Name to auth with. password (str): Password to auth with. identity (str): Identity to auth with (defaults to name).
def sasl_plain(self, name, password, identity=None): if identity is None: identity = name self.sasl('plain', name, password, identity)
1,056,532
Measure a list of states with a measurement matrix in the presence of measurement noise. Args: states (array): states to measure. Shape is NxSTATE_DIM. measurement_matrix (array): Each state in *states* is measured with this matrix. Should be MEAS_DIMxSTATE_DIM in shape. mea...
def measure_states(states, measurement_matrix, measurement_covariance): # Sanitise input measurement_matrix = np.atleast_2d(measurement_matrix) measurement_covariance = np.atleast_2d(measurement_covariance) measurement_dim = measurement_matrix.shape[0] if measurement_covariance.shape != (measur...
1,056,552
Generate states by simulating a linear system with constant process matrix and process noise covariance. Args: state_count (int): Number of states to generate. process_matrix (array): Square array process_covariance (array): Square array specifying process noise covariance. ...
def generate_states(state_count, process_matrix, process_covariance, initial_state=None): # Sanitise input process_matrix = np.atleast_2d(process_matrix) process_covariance = np.atleast_2d(process_covariance) state_dim = process_matrix.shape[0] if process_matrix.shape != (s...
1,056,553
Samples an observation's value. Args: value: A numeric value signifying the value to be sampled.
def observe(self, value): self._buffer.append(value) if len(self._buffer) == _BUFFER_SIZE: self._flush()
1,056,657
Retrieves the value estimate for the requested quantile rank. The requested quantile rank must be registered in the estimator's invariants a priori! Args: rank: A floating point quantile rank along the interval [0, 1]. Returns: A numeric value for the quantile ...
def query(self, rank): self._flush() current = self._head if not current: return 0 mid_rank = math.floor(rank * self._observations) max_rank = mid_rank + math.floor( self._invariant(mid_rank, self._observations) / 2) rank = 0.0 wh...
1,056,658
Disconnect all servers with a message. Args: message (str): Quit message to use on each connection.
def shutdown(self, message=None): for name, server in self.servers.items(): server.quit(message)
1,056,751
Create an IRC server connection slot. The server will actually be connected to when :meth:`girc.client.ServerConnection.connect` is called later. Args: server_name (str): Name of the server, to be used for functions and accessing the server later through the reactor...
def create_server(self, server_name, *args, **kwargs): server = ServerConnection(name=server_name, reactor=self) if args or kwargs: server.set_connect_info(*args, **kwargs) # register cached events for verb, infos in self._event_handlers.items(): for in...
1,056,752
Register an event with all servers. Args: direction (str): `in`, `out`, `both`, `raw`. verb (str): Event name. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first).
def register_event(self, direction, verb, child_fn, priority=10): if verb not in self._event_handlers: self._event_handlers[verb] = [] self._event_handlers[verb].append({ 'handler': child_fn, 'direction': direction, 'priority': priority, ...
1,056,755
Set phenotype matrix Args: Y: phenotype matrix [N, P] standardize: if True, phenotype is standardized (zero mean, unit variance)
def setY(self,Y,standardize=False): assert Y.shape[0]==self.N, 'CVarianceDecomposition:: Incompatible shape' assert Y.shape[1]==self.P, 'CVarianceDecomposition:: Incompatible shape' if standardize: Y=preprocess.standardize(Y) #check that missing values mat...
1,056,935
add random effects term for single trait models (no trait-trait covariance matrix) Args: K: NxN sample covariance matrix is_noise: bool labeling the noise term (noise term has K=eye) normalize: if True, K and Ks are scales such that K.diagonal().mean()==1 ...
def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None): assert self.P == 1, 'Incompatible number of traits' assert K!=None or is_noise, 'Specify covariance structure' if is_noise: assert self.noisPos==None, 'noise term already ex...
1,056,937
add fixed effect to the model Args: F: fixed effect matrix [N,1] A: design matrix [K,P] (e.g. SP.ones((1,P)) common effect; SP.eye(P) any effect)
def addFixedEffect(self,F=None,A=None): if A==None: A = SP.eye(self.P) if F==None: F = SP.ones((self.N,1)) assert A.shape[1]==self.P, 'Incompatible shape' assert F.shape[0]==self.N, 'Incompatible shape' if F.shape[1]>1: ...
1,056,939
Initialize GP objetct Args: fast: if fast==True initialize gpkronSum gp
def initGP(self,fast=False): if fast: assert self.n_terms==2, 'CVarianceDecomposition: for fast inference number of terms must be == 2' assert self.P>1, 'CVarianceDecomposition: for fast inference number of traits must be > 1' self.vd.initGPkronSum() e...
1,056,940
Uses 2 term single trait model to get covar params for initialization Args: termx: non-noise term terms that is used for initialization
def _getScalesDiag(self,termx=0): assert self.P>1, 'CVarianceDecomposition:: diagonal init_method allowed only for multi trait models' assert self.noisPos!=None, 'CVarianceDecomposition:: noise term has to be set' assert termx<self.n_terms-1, 'CVarianceDecomposition:: termx>=n_terms-1'...
1,056,941
Train the gp Args: fast: if true and the gp has not been initialized, initializes a kronSum gp scales0: initial variance components params fixed0: initial fixed effect params
def trainGP(self,fast=False,scales0=None,fixed0=None,lambd=None): assert self.n_terms>0, 'CVarianceDecomposition:: No variance component terms' if not self.init: self.initGP(fast=fast) # set lambda if lambd!=None: self.gp.setLambda(lambd) # set scales0 if sc...
1,056,944
Train the model repeadly up to a number specified by the users with random restarts and return a list of all relative minima that have been found Args: fast: Boolean. if set to True initalize kronSumGP verbose: Boolean. If set to True, verbose output is produce...
def findLocalOptima(self,fast=False,verbose=True,n_times=10,lambd=None): if not self.init: self.initGP(fast) opt_list = [] fixed0 = SP.zeros_like(self.gp.getParams()['dataTerm']) # minimises n_times for i in range(n_times): s...
1,056,946
get random initialization of variances based on the empirical trait variance Args: scales: if scales==None: set them randomly, else: set scales to term_num (if term_num==None: set to all terms) term_num: set scales to term_num
def setScales(self,scales=None,term_num=None): if scales==None: for term_i in range(self.n_terms): n_scales = self.vd.getTerm(term_i).getNumberScales() self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales))) elif term_num==None: as...
1,056,947
Returns the Parameters Args: term_i: index of the term we are interested in if term_i==None returns the whole vector of parameters
def getScales(self,term_i=None): if term_i==None: RV = self.vd.getScales() else: assert term_i<self.n_terms, 'Term index non valid' RV = self.vd.getScales(term_i) return RV
1,056,948
Returns explicitly the estimated trait covariance matrix Args: term_i: index of the term we are interested in
def getEstTraitCovar(self,term_i=None): assert self.P>1, 'Trait covars not defined for single trait analysis' if term_i==None: RV=SP.zeros((self.P,self.P)) for term_i in range(self.n_terms): RV+=self.vd.getTerm(term_i).getTraitCovar().K() else: ...
1,056,949
Returns the estimated trait correlation matrix Args: term_i: index of the term we are interested in
def getEstTraitCorrCoef(self,term_i=None): cov = self.getEstTraitCovar(term_i) stds=SP.sqrt(cov.diagonal())[:,SP.newaxis] RV = cov/stds/stds.T return RV
1,056,950
Set the kernel for predictions Args: term_i: index of the term we are interested in Ks: (TODO: is this the covariance between train and test or the covariance between test points?)
def setKstar(self,term_i,Ks): assert Ks.shape[0]==self.N #if Kss!=None: #assert Kss.shape[0]==Ks.shape[1] #assert Kss.shape[1]==Ks.shape[1] self.vd.getTerm(term_i).getKcf().setK0cross(Ks)
1,056,959
Registers a given index: * Creates and opens an index for it (if it doesn't exist yet) * Sets some default values on it (unless they're already set) Args: index (PonyWhoosh.Index): An instance of PonyWhoosh.Index class
def register_index(self, index): self._indexes[index._name] = index self.create_index(index) return index
1,057,132
Registers a single model for fulltext search. This basically creates a simple PonyWhoosh.Index for the model and calls self.register_index on it. Args: *fields: all the fields indexed from the model. **kw: The options for each field, sortedby, stored ...
def register_model(self, *fields, **kw): index = PonyWhooshIndex(pw=self) index._kw = kw index._fields = fields def inner(model): index._name = model._table_ if not index._name: index._name = model.__name__ self._entities[index._name] = model ...
1,057,133
Function to check if a address is unicast and that the CIDR mask is good Args: ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for ret...
def ucast_ip_mask(ip_addr_and_mask, return_tuple=True): regex_ucast_ip_and_mask = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-...
1,057,141
Function to check if a address is unicast Args: ip_addr: Unicast IP address in the following format 192.168.1.1 return_tuple: Set to True it returns a IP, set to False returns True or False Returns: see return_tuple for return options
def ucast_ip(ip_addr, return_tuple=True): regex_ucast_ip = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$") if return_t...
1,057,142
Function to check if a address is multicast and that the CIDR mask is good Args: ip_addr_and_mask: Multicast IP address and mask in the following format 239.1.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for r...
def mcast_ip_mask(ip_addr_and_mask, return_tuple=True): regex_mcast_ip_and_mask = __re.compile("^(((2[2-3][4-9])|(23[0-3]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2][0-9])...
1,057,143
Function to check if a address is multicast Args: ip_addr: Multicast IP address in the following format 239.1.1.1 return_tuple: Set to True it returns a IP, set to False returns True or False Returns: see return_tuple for return options
def mcast_ip(ip_addr, return_tuple=True): regex_mcast_ip = __re.compile("^(((2[2-3][4-9])|(23[0-3]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9])))$") if return_tuple: while not re...
1,057,144
Function to check if a address and CIDR mask is good Args: ip_addr_and_mask: IP address and mask in the following format 192.168.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for return options
def ip_mask(ip_addr_and_mask, return_tuple=True): regex_ip_and_mask = __re.compile("^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2...
1,057,145
Function to check if a address is good Args: ip_addr: IP address in the following format 192.168.1.1 return_tuple: Set to True it returns a IP, set to False returns True or False Returns: see return_tuple for return options
def ip(ip_addr, return_tuple=True): regex_ip = __re.compile("^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$") if return_tuple: ...
1,057,146
Function to verify a good CIDR value Args: cidr: CIDR value 0 to 32 return_cidr: Set to True it returns a CIDR value, set to False returns True or False Returns: see return_cidr for return options
def cidr_check(cidr, return_cidr=True): try: if int(cidr) < 0 or int(cidr) > 32: good_cidr = False else: good_cidr = True if return_cidr: while not good_cidr: print("Sorry the CIDR value %s is not a valid value must be a value of 0 to ...
1,057,147
Function to figure out the IP's between neighbors address Args: ip_addr: Unicast IP address in the following format 192.168.1.1 cidr: CIDR value of 30, or 31 Returns: returns Our IP and the Neighbor IP in a tuple
def get_neighbor_ip(ip_addr, cidr="30"): our_octet = None neighbor_octet = None try: ip_addr_split = ip_addr.split(".") max_counter = 0 if int(cidr) == 30: ranger = 4 elif int(cidr) == 31: ranger = 2 while max_counter < 256: tr...
1,057,148
Function to return a whole subnet value from a IP address and CIDR pair Args: ip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 0 to 32 Returns: returns the corrected whole subnet
def whole_subnet_maker(ip_addr, cidr): if ucast_ip(ip_addr, False) == False and mcast_ip(ip_addr, False) == False: LOGGER.critical('Function whole_subnet_maker ip_addr {item}'.format(item=ip_addr)) raise ValueError("Not a good ipv4 address") if not cidr_check(cidr, False): LOGGER.cr...
1,057,149
Function to return a subnet range value from a IP address and CIDR pair Args: ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 1 to 32 Returns: returns a dictionary of info
def subnet_range(ip_net, cidr): subnets_dict = dict() subnet = whole_subnet_maker(ip_net, cidr) subnets_dict['IP'] = ip_net subnets_dict['NET'] = subnet subnets_dict['CIDR'] = '%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr) if int(cidr) >= 24: subnet_split = subnet.split('.') ...
1,057,150
Function to return every subnet a ip can belong to with a longer prefix Args: ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 0 to 32 Returns: returns a list of subnets
def all_subnets_longer_prefix(ip_net, cidr): subnets_list = list() while int(cidr) <= 32: try: subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)) except Exception as e: LOGGER.critical('Function all_subnets_longer_prefix {item}'.format(item=e)) ...
1,057,151
Function to return every subnet a ip can belong to with a shorter prefix Args: ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 0 to 32 include_default: If you want the list to inlclude the default route set to True Ret...
def all_subnets_shorter_prefix(ip_net, cidr, include_default=False): subnets_list = list() if include_default: while int(cidr) >= 0: try: subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)) except Exception as e: LOGGER.cri...
1,057,152
Function to verify item entered is a number Args: check: Thing to check for a number return_number: Set to True it returns a number value, set to False returns True or False Returns: Check return_number for return options
def number_check(check, return_number=True): try: int(check) good = True except ValueError: LOGGER.critical('Function number_check ValueError {item}'.format(item=check)) good = False if return_number: while not good: print("That is not a number.") ...
1,057,154
Determine if a file is a packet trace that is supported by this module. Args: path (str): path to the trace file. Returns: bool: True if the file is a valid packet trace.
def is_packet_trace(path): path = os.path.abspath(path) if not os.path.isfile(path): return False try: f = open(path, 'rb') except: return False magic = f.read(4) f.close() return magic in FILE_TYPE_HANDLER
1,057,197
Read a packet trace file, return a :class:`wltrace.common.WlTrace` object. This function first reads the file's magic (first ``FILE_TYPE_HANDLER`` bytes), and automatically determine the file type, and call appropriate handler to process the file. Args: path (str): the file's path to be loaded...
def load_trace(path, *args, **kwargs): with open(path, 'rb') as f: magic = f.read(MAGIC_LEN) if magic not in FILE_TYPE_HANDLER: raise Exception('Unknown file magic: %s' % (binascii.hexlify(magic))) return FILE_TYPE_HANDLER[magic](path, *args, **kwargs)
1,057,198
Extended pretty printing for location strings. Args: format_spec str: Coordinate formatting system to use Returns: Human readable string representation of ``Point`` object Raises: ValueError: Unknown value for ``format_spec``
def __format__(self, format_spec='dd'): text = super(Station.__base__, self).__format__(format_spec) if self.alt_id: return '%s (%s - %s)' % (self.name, self.alt_id, text) else: return '%s (%s)' % (self.name, text)
1,057,252
Pull locations from a user's config file. Args: filename (str): Config file to parse Returns: dict: List of locations from config file
def read_locations(filename): data = ConfigParser() if filename == '-': data.read_file(sys.stdin) else: data.read(filename) if not data.sections(): logging.debug('Config file is empty') locations = {} for name in data.sections(): if data.has_option(name, 'lo...
1,057,333
Pull locations from a user's CSV file. Read gpsbabel_'s CSV output format .. _gpsbabel: http://www.gpsbabel.org/ Args: filename (str): CSV file to parse Returns: tuple of dict and list: List of locations as ``str`` objects
def read_csv(filename): field_names = ('latitude', 'longitude', 'name') data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True) locations = {} args = [] for index, row in enumerate(data, 1): name = '%02i:%s' % (index, row['name']) locations[name] = (row['lati...
1,057,334
Initialise a new ``LocationsError`` object. Args: function (str): Function where error is raised data (tuple): Location number and data
def __init__(self, function=None, data=None): super(LocationsError, self).__init__() self.function = function self.data = data
1,057,336
Initialise a new ``NumberedPoint`` object. Args: latitude (float): Location's latitude longitude (float): Location's longitude name (str): Location's name or command line position units (str): Unit type to be used for distances
def __init__(self, latitude, longitude, name, units='km'): super(NumberedPoint, self).__init__(latitude, longitude, units) self.name = name
1,057,338
Returns a transformed Geometry. Arguments: geom -- any coercible Geometry value or Envelope to_sref -- SpatialReference or EPSG ID as int
def transform(geom, to_sref): # If we have an envelope, assume it's in the target sref. try: geom = getattr(geom, 'polygon', Envelope(geom).polygon) except (TypeError, ValueError): pass else: geom.AssignSpatialReference(to_sref) try: geom_sref = geom.GetSpatialRe...
1,057,592
Creates an envelope from lower-left and upper-right coordinates. Arguments: args -- min_x, min_y, max_x, max_y or a four-tuple
def __init__(self, *args): if len(args) == 1: args = args[0] try: extent = list(map(float, args)) except (TypeError, ValueError) as exc: exc.args = ('Cannot create Envelope from "%s"' % repr(args),) raise try: self.min_...
1,057,594
Expands this envelope by the given Envelope or tuple. Arguments: other -- Envelope, two-tuple, or four-tuple
def expand(self, other): if len(other) == 2: other += other mid = len(other) // 2 self.ll = map(min, self.ll, other[:mid]) self.ur = map(max, self.ur, other[mid:])
1,057,597
Returns true if this envelope intersects another. Arguments: other -- Envelope or tuple of (minX, minY, maxX, maxY)
def intersects(self, other): try: return (self.min_x <= other.max_x and self.max_x >= other.min_x and self.min_y <= other.max_y and self.max_y >= other.min_y) except AttributeError: return self.intersects(Envelo...
1,057,599
Returns a new envelope rescaled from center by the given factor(s). Arguments: xfactor -- int or float X scaling factor yfactor -- int or float Y scaling factor
def scale(self, xfactor, yfactor=None): yfactor = xfactor if yfactor is None else yfactor x, y = self.centroid xshift = self.width * xfactor * 0.5 yshift = self.height * yfactor * 0.5 return Envelope(x - xshift, y - yshift, x + xshift, y + yshift)
1,057,600
desc: > Provides information about the exceptions that a function or method can raise. args: - name: desc desc: > Provides a description of what conditions will cause this exception to be raised type: s...
def __init__( self, desc, type, ): self.desc = type_assert(desc, str) self.type = type_assert(type, str)
1,057,677
Build managed property interface. Args: attr (str): Property's name Returns: property: Managed property interface
def _manage_location(attr): return property(lambda self: getattr(self, '_%s' % attr), lambda self, value: self._set_location(attr, value))
1,057,766
Generate a human readable DM/DMS location string. Args: latitude (float): Location's latitude longitude (float): Location's longitude mode (str): Coordinate formatting system to use unistr (bool): Whether to use extended character set
def _dms_formatter(latitude, longitude, mode, unistr=False): if unistr: chars = ('°', '′', '″') else: chars = ('°', "'", '"') latitude_dms = tuple(map(abs, utils.to_dms(latitude, mode))) longitude_dms = tuple(map(abs, utils.to_dms(longitude, mode))) text = [] if mode == 'dm...
1,057,767
Initialise a new ``TimedPoint`` object. Args: latitude (float, tuple or list): Location's latitude longitude (float, tuple or list): Location's longitude angle (str): Type for specified angles units (str): Units type to be used for distances timezone ...
def __init__(self, latitude, longitude, units='metric', angle='degrees', timezone=0, time=None): super(TimedPoint, self).__init__(latitude, longitude, units, angle, timezone) self.time = time
1,057,768
Recursively marshal a Python object to a BSON-compatible dict that can be passed to PyMongo, Motor, etc... Args: obj: object, It's members can be nested Python objects which will be converted to dictionaries types: tuple-of-types, The BSON primitive types, typically ...
def marshal_bson( obj, types=BSON_TYPES, fields=None, ): return marshal_dict( obj, types, fields=fields, )
1,058,285
Yields tile (x, y, z) tuples for a bounding box and zoom levels. Arguments: bbox - bounding box as a 4-length sequence zlevs - sequence of tile zoom levels
def from_bbox(bbox, zlevs): env = Envelope(bbox) for z in zlevs: corners = [to_tile(*coord + (z,)) for coord in (env.ul, env.lr)] xs, ys = [range(p1, p2 + 1) for p1, p2 in zip(*corners)] for coord in itertools.product(xs, ys, (z,)): yield coord
1,058,442
Returns a tuple of (longitude, latitude) from a map tile xyz coordinate. See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2 Arguments: xtile - x tile location as int or float ytile - y tile location as int or float zoom - zoom level as int or float
def to_lonlat(xtile, ytile, zoom): n = 2.0 ** zoom lon = xtile / n * 360.0 - 180.0 # Caculate latitude in radians and convert to degrees constrained from -90 # to 90. Values too big for tile coordinate pairs are invalid and could # overflow. try: lat_rad = math.atan(math.sinh(math.p...
1,058,443
Returns a tuple of (xtile, ytile) from a (longitude, latitude) coordinate. See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames Arguments: lon - longitude as int or float lat - latitude as int or float zoom - zoom level as int or float
def to_tile(lon, lat, zoom): lat_rad = math.radians(lat) n = 2.0 ** zoom xtile = int((lon + 180.0) / 360.0 * n) ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n) return xtile, ytile
1,058,444
Initialise a new ``_GpxElem`` object. Args: latitude (float): Element's latitude longitude (float): Element's longitude name (str): Name for Element description (str): Element's description elevation (float): Element's elevation time (util...
def __init__(self, latitude, longitude, name=None, description=None, elevation=None, time=None): super(_GpxElem, self).__init__(latitude, longitude, time=time) self.name = name self.description = description self.elevation = elevation
1,058,881
Calculate distances between locations in segments. Args: method (str): Method used to calculate distance Returns: list of list of float: Groups of distance between points in segments
def distance(self, method='haversine'): distances = [] for segment in self: if len(segment) < 2: distances.append([]) else: distances.append(segment.distance(method)) return distances
1,058,885
Calculate bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments
def bearing(self, format='numeric'): bearings = [] for segment in self: if len(segment) < 2: bearings.append([]) else: bearings.append(segment.bearing(format)) return bearings
1,058,886
Calculate final bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments
def final_bearing(self, format='numeric'): bearings = [] for segment in self: if len(segment) < 2: bearings.append([]) else: bearings.append(segment.final_bearing(format)) return bearings
1,058,887
Test whether locations are within a given range of ``location``. Args: location (Point): Location to test range against distance (float): Distance to test location is within Returns: list of list of Point: Groups of points in range per segment
def range(self, location, distance): return (segment.range(location, distance) for segment in self)
1,058,890
Calculate destination locations for given distance and bearings. Args: bearing (float): Bearing to move on in degrees distance (float): Distance in kilometres Returns: list of list of Point: Groups of points shifted by ``distance`` and ``bearing``
def destination(self, bearing, distance): return (segment.destination(bearing, distance) for segment in self)
1,058,891
Calculate sunrise times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate sunrise events, or end of twilight Returns: list of list of datetime.datetime: The time for the sunrise for each point in e...
def sunrise(self, date=None, zenith=None): return (segment.sunrise(date, zenith) for segment in self)
1,058,892
Calculate sunset times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate sunset events, or start of twilight times Returns: list of list of datetime.datetime: The time for the sunset for each poin...
def sunset(self, date=None, zenith=None): return (segment.sunset(date, zenith) for segment in self)
1,058,893
Calculate sunrise/sunset times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate rise/set events, or twilight times Returns: list of list of 2-tuple of datetime.datetime: The time for the sunrise ...
def sun_events(self, date=None, zenith=None): return (segment.sun_events(date, zenith) for segment in self)
1,058,894
Import information from GPX metadata. Args: elements (etree.Element): GPX metadata subtree
def import_metadata(self, elements): metadata_elem = lambda name: etree.QName(GPX_NS, name) for child in elements.getchildren(): tag_ns, tag_name = child.tag[1:].split('}') if not tag_ns == GPX_NS: continue if tag_name in ('name', 'desc', 'ke...
1,058,897
Helper function to simplify ``__repr__`` methods. Args: obj: Object to pull argument values for remap (dict): Argument pairs to remap before output Returns: str: Self-documenting representation of ``value``
def repr_assist(obj, remap=None): if not remap: remap = {} data = [] for arg in inspect.getargspec(getattr(obj.__class__, '__init__'))[0]: if arg == 'self': continue elif arg in remap: value = remap[arg] else: try: valu...
1,058,918
Prepare various input types for parsing. Args: data (iter): Data to read method (str): Method to process data with mode (str): Custom mode to process with, if data is a file Returns: list: List suitable for parsing Raises: TypeError: Invalid value for data
def prepare_read(data, method='readlines', mode='r'): if hasattr(data, 'readlines'): data = getattr(data, method)() elif isinstance(data, list): if method == 'read': return ''.join(data) elif isinstance(data, basestring): data = getattr(open(data, mode), method)() ...
1,058,919
Prepare various input types for CSV parsing. Args: data (iter): Data to read field_names (tuple of str): Ordered names to assign to fields Returns: csv.DictReader: CSV reader suitable for parsing Raises: TypeError: Invalid value for data
def prepare_csv_read(data, field_names, *args, **kwargs): if hasattr(data, 'readlines') or isinstance(data, list): pass elif isinstance(data, basestring): data = open(data) else: raise TypeError('Unable to handle data of type %r' % type(data)) return csv.DictReader(data, fie...
1,058,920
Prepare various input types for XML parsing. Args: data (iter): Data to read objectify (bool): Parse using lxml's objectify data binding Returns: etree.ElementTree: Tree suitable for parsing Raises: TypeError: Invalid value for data
def prepare_xml_read(data, objectify=False): mod = _objectify if objectify else etree if hasattr(data, 'readlines'): data = mod.parse(data).getroot() elif isinstance(data, list): data = mod.fromstring(''.join(data)) elif isinstance(data, basestring): data = mod.parse(open(da...
1,058,921
Create a simple namespace-aware objectify element creator. Args: namespace (str): Namespace to work in Returns: function: Namespace-aware element creator
def element_creator(namespace=None): ELEMENT_MAKER = _objectify.ElementMaker(namespace=namespace, annotate=False) def create_elem(tag, attr=None, text=None): if not attr: attr = {} if text: element = getattr(ELEME...
1,058,922