docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initialize wildcard route. Arguments: pattern (str): Pattern associated with the route. callback (callable): Route handler.
def __init__(self, pattern, callback): self._re = [] self._wildcards = [] for token in WildcardRoute.tokens(pattern): if token and token.startswith('<') and token.endswith('>'): w = Wildcard(token) self._wildcards.append(w) sel...
1,048,530
Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ...
def match(self, path): match = self._re.search(path) if match is None: return None args = [] kwargs = {} for i, wildcard in enumerate(self._wildcards): if wildcard.name == '!': continue value = wildcard.value(match.grou...
1,048,531
Initialize wildcard definition. Arguments: spec (str): An angle-bracket delimited wildcard specification.
def __init__(self, spec): # Split '<foo:int>' into ['foo', 'int'] tokens = spec[1:-1].split(':', 1) if len(tokens) == 1: # Split '<foo>' into ['foo', ''] tokens.append('') self.name, self._type = tokens if not self._type: self._type = 'str' ...
1,048,532
Initialize regular expression route. Arguments: pattern (str): Pattern associated with the route. callback (callable): Route handler.
def __init__(self, pattern, callback): self._re = re.compile(pattern) self._callback = callback
1,048,533
Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ...
def match(self, path): match = self._re.search(path) if match is None: return None kwargs_indexes = match.re.groupindex.values() args_indexes = [i for i in range(1, match.re.groups + 1) if i not in kwargs_indexes] args = [match.group...
1,048,534
Initialize the current request object. Arguments: environ (dict): Dictionary of environment variables.
def __init__(self, environ): self.environ = environ self.method = environ.get('REQUEST_METHOD', 'GET') self.path = environ.get('PATH_INFO', '/') if not self.path: self.path = '/' self.query = MultiDict() self.form = MultiDict() self.cookies = ...
1,048,535
Initialize the current response object. Arguments: start_response_callable (callable): Callable that starts response.
def __init__(self, start_response_callable): self.start = start_response_callable self.status = 200 self.media_type = 'text/html' self.charset = 'UTF-8' self._headers = [] self.body = None self.state = {}
1,048,536
Add an HTTP header to response object. Arguments: name (str): HTTP header field name value (str): HTTP header field value
def add_header(self, name, value): if value is not None: self._headers.append((name, value))
1,048,538
Add a Set-Cookie header to response object. For a description about cookie attribute values, see https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel. Arguments: name (str): Name of the cookie value (str): Value of the cookie attrs (dict): Dicit...
def set_cookie(self, name, value, attrs={}): cookie = http.cookies.SimpleCookie() cookie[name] = value for key, value in attrs.items(): cookie[name][key] = value self.add_header('Set-Cookie', cookie[name].OutputString())
1,048,539
Adds value to the list of values for the specified key. Arguments: key (object): Key value (object): Value
def __setitem__(self, key, value): if key not in self.data: self.data[key] = [value] else: self.data[key].append(value)
1,048,542
Return the list of all values for the specified key. Arguments: key (object): Key default (list): Default value to return if the key does not exist, defaults to ``[]``, i.e. an empty list. Returns: list: List of all values for the specified key if the key ...
def getall(self, key, default=[]): return self.data[key] if key in self.data else default
1,048,543
Split a line on the closest space, or break the last word with '-'. Args: what(str): text to spli one line of. indent(str): will prepend this indent to the split line, taking it into account in the column count. cols(int): maximum length of the split line. Returns: tupl...
def split_line(what, indent='', cols=79): if len(indent) > cols: raise ValueError("The indent can't be longer than cols.") if cols < 2: raise ValueError( "The cols can't be smaller than 2 (a char plus a possible '-')" ) what = indent + what.lstrip() if len(wha...
1,048,708
Wrap the given text to the columns, prepending the indent to each line. Args: what(str): text to wrap. indent(str): indentation to use. cols(int): colt to wrap to. Returns: str: Wrapped text
def fit_to_cols(what, indent='', cols=79): lines = [] while what: what, next_line = split_line( what=what, cols=cols, indent=indent, ) lines.append(next_line) return '\n'.join(lines)
1,048,709
Create the Trackr API interface object. Args: email (str): Trackr account email address. password (str): Trackrr account password.
def __init__(self, email, password): self.email = email self.password = password self.token = None self.last_api_call = None self.state = [] # get a token self.authenticate() # get the latest state from the API self.update_state_from_api()
1,048,867
Genreate a Band object given band metadata Args: band (dict): dictionary containing metadata for a given band Return: Band : the loaded Band onject
def GenerateBand(self, band, meta_only=False, cast=False): # Read the band data and add it to dictionary if not meta_only: fname = band.get('file_name') data = self.ReadTif('%s/%s' % (os.path.dirname(self.filename), fname)) # band['data'] = data # TODO: data...
1,049,866
Turn the running process into a proper daemon according to PEP3143. Args: pidfile --The pidfile to create.
def daemonize(pidfile=None): # Prevent core dumps resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) # Change working directory os.chdir("/") # Change file creation mask os.umask(0) # Detach process context: do double fork pid = os.fork() if pid > 0: os._exit(0) os...
1,050,257
Parse the value of a config option and convert it to a dictionary. The configuration allows lines formatted like: foo = Bar:1,Baz,Flub:0.75 This gets converted to a dictionary: foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 } Args: option_value -- The config string to parse.
def config_str2dict(option_value): dict = {} for key in option_value.split(','): if ':' in key: key, value = pair.split(':') value = float(value) else: value = 0 dict[key] = value return dict
1,050,258
Write a line of data to the server. Args: line -- A single line of data to write to the socket.
def _send(self, line): if not line.endswith('\r\n'): if line.endswith('\n'): logger.debug('Fixing bare LF before sending data to socket') line = line[0:-1] + '\r\n' else: logger.debug( 'Fixing missing CRLF befor...
1,050,294
Peek at the data in the server response. Peeking should only be done when the response can be predicted. Make sure that the socket will not block by requesting too much data from it while peeking. Args: chars -- the number of characters to peek.
def _peek(self, chars=1): line = self._socket.recv(chars, socket.MSG_PEEK) logger.debug('Server sent (peek): ' + line.rstrip()) return line
1,050,296
Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: ...
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''): releasenotes = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES') if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenot...
1,050,347
Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved
def create_releasenotes(project_dir=os.curdir, bugtracker_url=''): pkg_info_file = os.path.join(project_dir, 'PKG-INFO') if os.path.exists(pkg_info_file): return with open('RELEASE_NOTES', 'wb') as releasenotes_fd: releasenotes_fd.write( get_releasenotes( pr...
1,050,350
Returns the set of fluents in the expression's scope. Args: expr: Expression object or nested tuple of Expressions. Returns: The set of fluents in the expression's scope.
def __get_scope(cls, expr: Union['Expression', Tuple]) -> Set[str]: scope = set() for i, atom in enumerate(expr): if isinstance(atom, Expression): scope.update(cls.__get_scope(atom._expr)) elif type(atom) in [tuple, list]: scop...
1,050,623
Copies a record and its fields, recurses for any field that is a Record. For records that have nested mutable fields, use copy.deepcopy. Args: record: A Record instance to be copied. **field_overrides: Fields and their values to override in the new copy. Returns: A copy of the given r...
def CopyRecord(record, **field_overrides): fields = field_overrides for field in record.__slots__: if field in field_overrides: continue value = getattr(record, field) if isinstance(value, RecordClass): # Recurse for records. new_value = CopyReco...
1,050,942
Given a repo will add a tag for each major version. Args: repo_path(str): path to the git repository to tag.
def tag_versions(repo_path): repo = dulwich.repo.Repo(repo_path) tags = get_tags(repo) maj_version = 0 feat_version = 0 fix_version = 0 last_maj_version = 0 last_feat_version = 0 result = [] for commit_sha, children in reversed( get_children_per_first_parent(repo_pa...
1,051,588
Initialise data object Arguments: o multiplicon_file - location of iADHoRe multiplicon.txt o segment_file - location of iADHoRe segment.txt file o db_filename - location to write SQLite3 database (defaults to in-memory)
def __init__(self, multiplicon_file=None, segment_file=None, db_filename=":memory:"): # Attributes later populated in methods self._dbconn = None self._redundant_multiplicon_cache = None # Get arguments and initialise self._multiplicon_file = multiplicon...
1,051,645
Return a generator of the IDs of multiplicons found at leaves of the tree (i.e. from which no further multiplicons were derived). Arguments: o redundant - if true, report redundant multiplicons
def get_multiplicon_leaves(self, redundant=False): for node in self._multiplicon_graph.nodes(): if not len(self._multiplicon_graph.out_edges(node)): if not self.is_redundant_multiplicon(node): yield node elif redundant: ...
1,051,649
Return a generator of the IDs of multiplicons that are initial seeding 'pairs' in level 2 multiplicons. Arguments: o redundant - if true, report redundant multiplicons
def get_multiplicon_seeds(self, redundant=False): for node in self._multiplicon_graph.nodes(): if not len(self._multiplicon_graph.in_edges(node)): if not self.is_redundant_multiplicon(node): yield node elif redundant: y...
1,051,650
Return a generator of the IDs of multiplicons that are neither seeding 'pairs' in level 2 multiplicons, nor leaves. Arguments: o redundant - if true, report redundant multiplicons
def get_multiplicon_intermediates(self, redundant=False): for node in self._multiplicon_graph.nodes(): if len(self._multiplicon_graph.in_edges(node)) and \ len(self._multiplicon_graph.out_edges(node)): if not self.is_redundant_multiplicon(node): ...
1,051,651
Get a manifest file, parse and store it. Args: webfont_name (string): Webfont key name. Used to store manifest and potentially its parser error. webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``).
def get(self, webfont_name, webfont_settings): try: webfont_settings = extend_webfont_settings(webfont_settings) except IcomoonSettingsError as e: msg = "Invalid webfont settings for '{}': {}" self.errors[webfont_name] = msg.format(webfont_name, e.value) ...
1,051,874
Store every defined webfonts. Webfont are stored with sort on their name. Args: webfonts (dict): Dictionnary of webfont settings from ``settings.ICOMOON_WEBFONTS``.
def fetch(self, webfonts): sorted_keys = sorted(webfonts.keys()) for webfont_name in sorted_keys: self.get(webfont_name, webfonts[webfont_name])
1,051,875
Get an update from the specified service. Arguments: name (:py:class:`str`): The name of the service. service_map (:py:class:`dict`): A mapping of service names to :py:class:`flash.service.core.Service` instances. Returns: :py:class:`dict`: The updated data.
def update_service(name, service_map): if name in service_map: service = service_map[name] data = service.update() if not data: logger.warning('no data received for service: %s', name) else: data['service_name'] = service.service_name CACHE[na...
1,051,917
And a friendly update time to the supplied data. Arguments: data (:py:class:`dict`): The response data and its update time. Returns: :py:class:`dict`: The data with a friendly update time.
def add_time(data): payload = data['data'] updated = data['updated'].date() if updated == date.today(): payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S') elif updated >= (date.today() - timedelta(days=1)): payload['last_updated'] = 'yesterday' elif updated ...
1,051,918
Initialize the BabelfyClient. Arguments: api_key -- key to connect the babelfy api Keyword arguments: params -- params for the api request
def __init__(self, api_key, params=None): self._api_key = api_key self._params = params or dict() self._data = list() self._entities = list() self._all_entities = list() self._merged_entities = list() self._all_merged_entities = list() self._text ...
1,051,961
Run PCA and calculate the cumulative fraction of variance Args: Y: phenotype values standardize: if True, phenotypes are standardized Returns: var: cumulative distribution of variance explained
def PC_varExplained(Y,standardized=True): # figuring out the number of latent factors if standardized: Y-=Y.mean(0) Y/=Y.std(0) covY = sp.cov(Y) S,U = linalg.eigh(covY+1e-6*sp.eye(covY.shape[0])) S = S[::-1] rv = np.array([S[0:i].sum() for i in range(1,S.shape[0])]) rv/=...
1,052,114
split into windows using a slide criterion Args: size: window size step: moving step (default: 0.5*size) Returns: wnd_i: number of windows nSnps: vector of per-window number of SNPs
def splitGenoSlidingWindow(pos,out_file,size=5e4,step=None): if step is None: step = 0.5*size chroms = SP.unique(pos[:,0]) RV = [] wnd_i = 0 wnd_file = csv.writer(open(out_file,'w'),delimiter='\t') nSnps = [] for chrom_i in chroms: Ichrom = pos[:,0]==chrom_i idx_chr...
1,052,132
add fixed effect term to the model Args: F: sample design matrix for the fixed effect [N,K] A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P] Ftest: sample design matrix for test samples [Ntest,K]
def addFixedEffect(self, F=None, A=None, Ftest=None): if A is None: A = sp.eye(self.P) if F is None: F = sp.ones((self.N,1)) if self.Ntest is not None: Ftest = sp.ones((self.Ntest,1)) assert A.shape[1]==self.P, 'VarianceDecomposition:...
1,052,138
Return weights for fixed effect term term_i Args: term_i: fixed effect term index Returns: weights of the spefied fixed effect term. The output will be a KxL matrix of weights will be returned, where K is F.shape[1] and L is A.shape[1] of the correspo...
def getWeights(self, term_i=None): assert self.init, 'GP not initialised' if term_i==None: if self.gp.mean.n_terms==1: term_i = 0 else: print('VarianceDecomposition: Specify fixed effect term index') return self.gp.mean.B[term_i]
1,052,140
Return the estimated trait covariance matrix for term_i (or the total if term_i is None) To retrieve the matrix of correlation coefficient use \see getTraitCorrCoef Args: term_i: index of the random effect term we want to retrieve the covariance matrix Returns: e...
def getTraitCovar(self, term_i=None): assert term_i < self.n_randEffs, 'VarianceDecomposition:: specied term out of range' if term_i is None: RV = sp.zeros((self.P,self.P)) for term_i in range(self.n_randEffs): RV += self.getTraitCovarFun().K() e...
1,052,141
Return the estimated trait correlation coefficient matrix for term_i (or the total if term_i is None) To retrieve the trait covariance matrix use \see getTraitCovar Args: term_i: index of the random effect term we want to retrieve the correlation coefficients Returns: ...
def getTraitCorrCoef(self,term_i=None): cov = self.getTraitCovar(term_i) stds = sp.sqrt(cov.diagonal())[:,sp.newaxis] RV = cov / stds / stds.T return RV
1,052,142
Return the estimated variance components Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: variance components of all random effects on all phenotypes [P, n_randEffs matrix]
def getVarianceComps(self, univariance=False): RV=sp.zeros((self.P,self.n_randEffs)) for term_i in range(self.n_randEffs): RV[:,term_i] = self.getTraitCovar(term_i).diagonal() if univariance: RV /= RV.sum(1)[:,sp.newaxis] return RV
1,052,143
Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar) Args: term_i: index of the term we are interested in
def getTraitCovarStdErrors(self,term_i): assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.P==1: out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i] else: C = s...
1,052,150
Return the standard errors on the estimated variance components (for variance component estimates \see getVarianceComps) Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: standard errors on variance componen...
def getVarianceCompStdErrors(self,univariance=False): RV=sp.zeros((self.P,self.n_randEffs)) for term_i in range(self.n_randEffs): #RV[:,term_i] = self.getTraitCovarStdErrors(term_i).diagonal() RV[:,term_i] = self.getTraitCovarStdErrors(term_i) var = self.getVaria...
1,052,151
predict the conditional mean (BLUP) Args: use_fixed: list of fixed effect indeces to use for predictions use_random: list of random effect indeces to use for predictions Returns: predictions (BLUP)
def predictPhenos(self,use_fixed=None,use_random=None): assert self.noisPos is not None, 'No noise element' assert self.init, 'GP not initialised' assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecompositio...
1,052,152
Internal function for parameter initialization estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise) Args: K: covariance matrix of the non-noise random effect term
def _getH2singleTrait(self, K, verbose=None): verbose = dlimix.getVerbose(verbose) # Fit single trait model varg = sp.zeros(self.P) varn = sp.zeros(self.P) fixed = sp.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] # check...
1,052,154
Internal function for parameter initialization Uses 2 term single trait model to get covar params for initialization Args: termx: non-noise term terms that is used for initialization
def _getScalesDiag(self,termx=0): assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models' assert self.noisPos is not None, 'VarianceDecomposition:: noise term has to be set' assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_ran...
1,052,155
Correctly joins tokens to multiple sentences Instead of always placing white-space between the tokens, it will distinguish between the next symbol and *not* insert whitespace if it is a sentence symbol (e.g. '.', or '?') Args: tokens: array of string tokens Returns: Joint sentences...
def join_tokens_to_sentences(tokens): text = "" for (entry, next_entry) in zip(tokens, tokens[1:]): text += entry if next_entry not in SENTENCE_STOPS: text += " " text += tokens[-1] return text
1,052,165
Returns a sorted list of prefixes. Args: orig (str): Unsorted list of prefixes. prefixes (str): List of prefixes, from highest-priv to lowest.
def sort_prefixes(orig, prefixes='@+'): new = '' for prefix in prefixes: if prefix in orig: new += prefix return new
1,052,216
Return a modelist. Args: params (list of str): Parameters from MODE event. mode_types (list): CHANMODES-like mode types. prefixes (str): PREFIX-like mode types.
def parse_modes(params, mode_types=None, prefixes=''): # we don't accept bare strings because we don't want to try to do # intelligent parameter splitting params = list(params) if params[0][0] not in '+-': raise Exception('first param must start with + or -') if mode_types is None: ...
1,052,217
Initialize auth method with existing credentials. Args: credentials: OAuth2 credentials obtained via GAP OAuth2 library.
def __init__(self, credentials): if not has_httplib2: raise ImportError("No module named httplib2") super(GAPDecoratorAuthMethod, self).__init__() self._http = None self._credentials = credentials self._action_token = None
1,052,317
A Generic parser for arbitrary tags in a node. Parameters: - node: A node in the DOM. - pad: `int` (default: 0) If 0 the node data is not padded with newlines. If 1 it appends a newline after parsing the childNodes. If 2 it pads before and after the nodes...
def generic_parse(self, node, pad=0): npiece = 0 if pad: npiece = len(self.pieces) if pad == 2: self.add_text('\n') for n in node.childNodes: self.parse(n) if pad: if len(self.pieces) > npiece: self....
1,052,326
Scp a remote file to local Args: remote_path (str) local_path (str)
def scp_file_remote_to_local(self, remote_path, local_path): scp_command = [ 'scp', '-o', 'StrictHostKeyChecking=no', '-i', self.browser_config.get('ssh_key_path'), '%s@%s:"%s"' % ( self.browser_config....
1,053,052
Execute a command on the node Args: command (str)
def execute_command(self, command): self.info_log("executing command: %s" % command) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) k = paramiko.RSAKey.from_private_key_file( self.browser_config.ge...
1,053,053
Load json or yaml data from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> jsdata = composite.load(json) >>> >>> with open('data.yml', 'r') as yml: >>> ymldata ...
def load(cls, fh): dat = fh.read() try: ret = cls.from_json(dat) except: ret = cls.from_yaml(dat) return ret
1,053,059
Load json from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> data = composite.load(json)
def from_json(cls, fh): if isinstance(fh, str): return cls(json.loads(fh)) else: return cls(json.load(fh))
1,053,060
Recursively compute intersection of data. For dictionaries, items for specific keys will be reduced to unique items. For lists, items will be reduced to unique items. This method is meant to be analogous to set.intersection for composite objects. Args: other (composite): Oth...
def intersection(self, other, recursive=True): if not isinstance(other, composite): raise AssertionError('Cannot intersect composite and {} types'.format(type(other))) if self.meta_type != other.meta_type: return composite({}) if self.meta_type == 'list...
1,053,070
Write composite object to file handle in JSON format. Args: fh (file): File handle to write to. pretty (bool): Sort keys and indent in output.
def write_json(self, fh, pretty=True): sjson = json.JSONEncoder().encode(self.json()) if pretty: json.dump(json.loads(sjson), fh, sort_keys=True, indent=4) else: json.dump(json.loads(sjson), fh) return
1,053,078
Prune leaves of filetree according to specified regular expression. Args: regex (str): Regular expression to use in pruning tree.
def prune(self, regex=r".*"): return filetree(self.root, ignore=self.ignore, regex=regex)
1,053,086
Initialize the Micropub extension if it was not given app in the constructor. Args: app (flask.Flask): the flask application to extend. client_id (string, optional): the IndieAuth client id, will be displayed when the user is asked to authorize this client. If not ...
def init_app(self, app, client_id=None): if not self.client_id: if client_id: self.client_id = client_id else: self.client_id = app.name
1,053,426
Add a field in the index of the model. Args: fieldname (Text): This parameters register a new field in specified model. fieldspec (Name, optional): This option adds various options as were described before. Returns: TYPE: The new schema after deleted is returned.
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT): self._whoosh.add_field(fieldname, fieldspec) return self._whoosh.schema
1,053,504
This function deletes one determined field using the command MODEL.pw.delete_field(FIELD) Args: field_name (string): This argument let you delete some field for some model registered in the index. Returns: (WhooshSchema): The new schema after deleted is returned.
def delete_field(self, field_name): self._whoosh.remove_field(field_name.strip()) return self._whoosh.schema
1,053,505
Prepares search string as a proper whoosh search string. Args: search_string (str): it prepares the search string and see if the lenght is correct. Optional Args: add_wildcards (bool): It runs a query for inexact queries. Raises: ValueError: When the search string does not have ...
def prep_search_string(self, search_string, add_wildcards=False): s = search_string.strip() try: s = str(s) except: pass s = s.replace('*', '') if len(s) < self._pw.search_string_min_len: raise ValueError('Search string must have at least {} characters' .format(self....
1,053,509
Convert MCS index to rate in Mbps. See http://mcsindex.com/ Args: mcs (int): MCS index bw (int): bandwidth, 20, 40, 80, ... long_gi(bool): True if long GI is used. Returns: rate (float): bitrate in Mbps >>> mcs_to_rate(5, bw=20, long_gi=False) 57.8 >>> mcs_t...
def mcs_to_rate(mcs, bw=20, long_gi=True): if bw not in [20, 40, 80, 160]: raise Exception("Unknown bandwidth: %d MHz" % (bw)) if mcs not in MCS_TABLE: raise Exception("Unknown MCS: %d" % (mcs)) idx = int((math.log(bw/10, 2)-1)*2) if not long_gi: idx += 1 return MCS_TAB...
1,053,621
Convert bit rate to MCS index. Args: rate (float): bit rate in Mbps bw (int): bandwidth, 20, 40, 80, ... long_gi (bool): True if long GI is used. Returns: mcs (int): MCS index >>> rate_to_mcs(120, bw=40, long_gi=False) 5
def rate_to_mcs(rate, bw=20, long_gi=True): if bw not in [20, 40, 80, 160]: raise Exception("Unknown bandwidth: %d MHz" % (bw)) idx = int((math.log(bw/10, 2)-1)*2) if not long_gi: idx += 1 for mcs, rates in MCS_TABLE.items(): if abs(rates[idx] - rate) < 1e-3: re...
1,053,622
After each :py:meth:`predict`, this method may be called repeatedly to provide additional measurements for each time step. Args: measurement (MultivariateNormal): Measurement for this time step with specified mean and covariance. measurement_matrix (array): Measu...
def update(self, measurement, measurement_matrix): # Sanitise input arguments measurement_matrix = np.atleast_2d(measurement_matrix) expected_meas_mat_shape = (measurement.mean.shape[0], self.state_length) if measurement_matrix.shape != expected_meas_mat_shape: raise...
1,053,711
Truncate the filter as if only *new_count* :py:meth:`.predict`, :py:meth:`.update` steps had been performed. If *new_count* is greater than :py:attr:`.state_count` then this function is a no-op. Measurements, state estimates, process matrices and process noises which are truncated are d...
def truncate(self, new_count): self.posterior_state_estimates = self.posterior_state_estimates[:new_count] self.prior_state_estimates = self.prior_state_estimates[:new_count] self.measurements = self.measurements[:new_count] self.process_matrices = self.process_matrices[:new_cou...
1,053,712
Scrape a twitter archive csv, yielding tweet text. Args: directory (str): CSV file or (directory containing tweets.csv). field (str): Field with the tweet's text (default: text). fieldnames (list): The column names for a csv with no header. Must contain <field>. ...
def read_csv(directory): if path.isdir(directory): csvfile = path.join(directory, 'tweets.csv') else: csvfile = directory with open(csvfile, 'r') as f: for tweet in csv.DictReader(f): try: tweet['text'] = tweet['text'].decode('utf-8') exc...
1,053,738
A quick regular expression check to see that the input is sane Args: equation_str (str): String of equation to be parsed by sympify function. Expected to be valid Python. Raises: BadInputError: If input does not look safe to parse as an equation.
def regex_check(equation_str): match1 = re.match( r'^(([xy+\-*/()0-9. ]+|sin\(|cos\(|exp\(|log\()?)+$', equation_str ) match2 = re.match(r'^.*([xy]) *([xy]).*$', equation_str) if match1 and not match2: return True raise BadInputError('Cannot parse entered equation')
1,053,894
Write the data out as base64 binary Args: output (file-like object): Output to write figure to.
def write_data(self, output): if self.figure: canvas = FigureCanvas(self.figure) self.figure.savefig(output, format='png', bbox_inches='tight') output.seek(0) return output.getvalue() return None
1,053,899
Return section of the config for a specific context (sub-command). Parameters: ctx (Context): The Click context object. optional (bool): If ``True``, return an empty config object when section is missing. Returns: Section: The configuration secti...
def section(self, ctx, optional=False): values = self.load() try: return values[ctx.info_name] except KeyError: if optional: return configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS) raise LoggedFailure("Configuration section '{}' not...
1,054,042
Return the specified name from the root section. Parameters: name (str): The name of the requested value. default (optional): If set, the default value to use instead of raising :class:`LoggedFailure` for unknown names. Re...
def get(self, name, default=NO_DEFAULT): values = self.load() try: return values[name] except KeyError: if default is self.NO_DEFAULT: raise LoggedFailure("Configuration value '{}' not found in root section!".format(name)) return defau...
1,054,043
Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``...
def import_name(modulename, name=None): if name is None: modulename, name = modulename.rsplit(':', 1) module = __import__(modulename, globals(), {}, [name]) return getattr(module, name)
1,054,150
Load a Python module from a path under a specified name. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. modulepath (str): Filename of the module. Returns: Loaded module.
def load_module(modulename, modulepath): if '.' in modulename: modulepackage, modulebase = modulename.rsplit('.', 1) else: modulepackage = '' imp.acquire_lock() try: # Check if module is already loaded if modulename not in sys.modules: # Find module on d...
1,054,151
Kill process by pid Args: pid (int)
def kill_pid(self, pid): try: p = psutil.Process(pid) p.terminate() self.info_log('Killed [pid:%s][name:%s]' % (p.pid, p.name())) except psutil.NoSuchProcess: self.error_log('No such process: [pid:%s]' % pid)
1,054,188
Kill by process name Args: procname (str)
def kill(self, procname): for proc in psutil.process_iter(): if proc.name() == procname: self.info_log( '[pid:%s][name:%s] killed' % (proc.pid, proc.name()) ) proc.kill()
1,054,189
Print test summary When the test batch is finished a test summary will be printed Args: executed_tests (list)
def print_test_summary(self, executed_tests): separator = '---------------------' with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa test_batch = session.query(Testbatch).filter(Testbatch.mongo_id == self.test_batch_id).one() # noqa ...
1,054,193
Navigate to a specific url This specific implementation inject a javascript script to intercept the javascript error Configurable with the "proxy_driver:intercept_javascript_error" config Args: url (str): the url to navigate to Returns: bool
def get(self, url): self._driver.get(url) if self.bot_diary: self.bot_diary.add_auto_entry( "I went on", target=url, take_screenshot=True ) if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: ...
1,054,268
Return the gathered javascript error Args: return_type: 'string' | 'list'; default: 'string'
def get_javascript_error(self, return_type='string'): if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: js_errors = self._driver.execute_script( 'return window.jsErrors; window.jsErrors = [];' ) if not js_errors: js_erro...
1,054,270
Drag and drop Args: source_selector: (str) destination_selector: (str) Kwargs: use_javascript_dnd: bool; default: config proxy_driver:use_javascript_dnd
def drag_and_drop(self, source_selector, destination_selector, **kwargs): self.info_log( "Drag and drop: source (%s); destination (%s)" % (source_selector, destination_selector) ) use_javascript_dnd = kwargs.get( "use_javascript_dnd", "pr...
1,054,272
Take a screenshot of a node Args: element (object): the proxy_element screenshot_path (str): the path where the screenshot will be saved
def take_node_screenshot(self, element, screenshot_path): from PIL import Image temp_path = os.path.join(tempdir, screenshot_path) el_x = int(element.location['x']) el_y = int(element.location['y']) el_height = int(element.size['height']) el_width = int(element...
1,054,274
Take a quality screenshot Use the screenshot_name args when you want to take a screenshot for reference Args: screenshot_name (str) the name of the screenshot
def take_quality_screenshot(self, screenshot_name): self.info_log("Taking a quality screenshot...") if self.test_instance._runner_dir: _screenshot_name = '%s.png' % string_to_filename(screenshot_name) relative_path = os.path.join( self.test_instance....
1,054,276
Assert that the element is present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
def assert_present(self, selector, testid=None, **kwargs): self.info_log( "Assert present selector(%s) testid(%s)" % (selector, testid) ) wait_until_present = kwargs.get( 'wait_until_present', BROME_CONFIG['proxy_driver']['wait_until_present_before_a...
1,054,277
Assert that the element is not present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_present (bool) Returns: bool: True is the assertion succeed; False otherwise...
def assert_not_present(self, selector, testid=None, **kwargs): self.info_log( "Assert not present selector(%s) testid(%s)" % (selector, testid) ) wait_until_not_present = kwargs.get( 'wait_until_not_present', BROME_CONFIG['proxy_driver'][...
1,054,278
Assert that the element is visible in the dom Args: selector (str): the selector used to find the element testid (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succ...
def assert_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'] ) ...
1,054,279
Assert that the element is not visible in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_visible (bool) highlight (bool) Returns: bool: True is the asser...
def assert_not_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert not visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_failure'] ...
1,054,280
Assert that the element's text is equal to the provided value Args: selector (str): the selector used to find the element value (str): the value that will be compare with the element.text value test_id (str): the test_id or a str Kwargs: ...
def assert_text_equal(self, selector, value, testid=None, **kwargs): self.info_log( "Assert text equal selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'] ...
1,054,281
Create a test result entry in the persistence layer Args: testid (str) result (bool) Keyword Args: extra_data (dict): the extra data that will be saved with the test result Returns: None
def create_test_result(self, testid, result, **kwargs): embed = True videocapture_path = self.test_instance._video_capture_file_relative_path # noqa screenshot_relative_path = '' extra_data = {} # JAVASCRIPT ERROR if not result: extra_data['javascr...
1,054,282
Return an iterator tweets from users in these locations. See https://dev.twitter.com/streaming/overview/request-parameters#locations Params: locations...list of bounding box locations of the form: southwest_longitude, southwest_latitude, northeast_longitude, northeast_latitude, ...
def track_locations(locations): if len(locations) % 4 != 0: raise Exception('length of bounding box list should be a multiple of four') results = twapi.request('statuses/filter', {'locations': ','.join('%f' % l for l in locations)}) return results.get_iterator()
1,054,305
Constructor Args: pos: position chrom: chromosome
def __init__(self,pos=None,chrom=None,separate_chroms=False): # assert assert pos is not None, 'Slider:: set pos' assert chrom is not None, 'Slider:: set chrom' self.pos = pos self.chrom = chrom # sep chroms self.separate_chroms = separate_chroms ...
1,054,686
split into windows using a slide criterion Args: size: window size step: moving step (default: 0.5*size) minSnps: only windows with nSnps>=minSnps are considered maxSnps: only windows with nSnps>=maxSnps are considered
def _splitGenoSlidingWindow(self,size=5e4,step=None,minSnps=1.,maxSnps=SP.inf): if step is None: step = 0.5*size chroms = SP.unique(self.chrom) wnd_pos = [] idx_wnd_start = [] nSnps = [] wnd_i = 0 nSnps = [] for chrom_i in chrom...
1,054,688
Return the default args as a parent parser, optionally adding a version Args: version (str): version to return on <cli> --version include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
def parent(version=None, include=None): parser = argparse.ArgumentParser(add_help=False) add_default_args(parser, version=version, include=include) return parser
1,054,699
Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object.
def add_logger(name, level=None, format=None): format = format or '%(filename)-11s %(lineno)-3d: %(message)s' log = logging.getLogger(name) # Set logging level. log.setLevel(level or logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter(format)) log.ad...
1,054,700
Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files. Args: ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov build: (type: string) genome build, either 'b37' or 'b38'
def nav_to_vcf_dir(ftp, build): if build == 'b37': ftp.cwd(DIR_CLINVAR_VCF_B37) elif build == 'b38': ftp.cwd(DIR_CLINVAR_VCF_B38) else: raise IOError("Genome build not recognized.")
1,054,729
Determine the filename for the most recent comprehensive ClinVar VCF. Args: build: (type: string) genome build, either 'b37' or 'b38' Returns: (type: string) Filename of the most recent comprehensive ClinVar VCF.
def latest_vcf_filename(build): ftp = FTP('ftp.ncbi.nlm.nih.gov') ftp.login() nav_to_vcf_dir(ftp, build=build) clinvar_datestamped = [f for f in ftp.nlst() if re.match('^clinvar_[0-9]{8}.vcf.gz$', f)] if len(clinvar_datestamped) == 1: return clinvar_datestampe...
1,054,730
Computes the transition probabilities of a corpus Args: corpus: the given corpus (a corpus_entry needs to be iterable) order: the maximal Markov chain order
def _compute_transitions(self, corpus, order=1): self.transitions = defaultdict(lambda: defaultdict(int)) for corpus_entry in corpus: tokens = self.tokenize(corpus_entry) last_tokens = utils.prefilled_buffer( self._start_symbol, length=self.order) ...
1,054,766
Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API
def has_entities(status): try: if sum(len(v) for v in status.entities.values()) > 0: return True except AttributeError: if sum(len(v) for v in status['entities'].values()) > 0: return True return False
1,054,781
Replace shorturls in a status with expanded urls. Args: status (tweepy.status): A tweepy status object Returns: str
def replace_urls(status): text = status.text if not has_url(status): return text urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']] urls.sort(key=lambda x: x[0][0], reverse=True) for (start, end), url in urls: text = text[:start] + url + text[end:] ...
1,054,783
Create query from list of terms, using OR but intelligently excluding terms beginning with '-' (Twitter's NOT operator). Optionally add -from:exclude_screen_name. >>> helpers.queryize(['apple', 'orange', '-peach']) u'apple OR orange -peach' Args: terms (list): Search terms. exclude...
def queryize(terms, exclude_screen_name=None): ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-')) nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-')) sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else '' return ' '.join((ors, nots...
1,054,785
Shorten a string so that it fits under max_len, splitting it at 'split'. Not guaranteed to return a string under max_len, as it may not be possible Args: text (str): String to shorten max_len (int): maximum length. default 140 split (str): strings to split on (default is common punctuat...
def chomp(text, max_len=280, split=None): split = split or '—;,.' while length(text) > max_len: try: text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1] except IndexError: return text return text
1,054,786
Count the length of a str the way Twitter does, double-counting "wide" characters (e.g. ideographs, emoji) Args: text (str): Text to count. Must be a unicode string in Python 2 maxval (int): The maximum encoding that will be counted as 1 character. Defaults to 4351 (ჿ GEORGIAN LETTE...
def length(text, maxval=None, encoding=None): maxval = maxval or 4351 try: assert not isinstance(text, six.binary_type) except AssertionError: raise TypeError('helpers.length requires a unicode argument') return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', t...
1,054,787
Execute a command on the node Args: command (str) Kwargs: username (str)
def execute_command(self, command, **kwargs): self.info_log("executing command: %s" % command) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) username = kwargs.get( 'username', self...
1,054,907