docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Load a text file as an array of lines. Args: filename: Path to the input file. Returns: An array of strings, each representing an individual line.
def load_lines(filename): with open(filename, 'r', encoding='utf-8') as f: return [line.rstrip('\n') for line in f.readlines()]
869,103
Save an array of lines to a file. Args: lines: An array of strings that will be saved as individual lines. filename: Path to the output file.
def save_lines(lines, filename): with open(filename, 'w', encoding='utf-8') as f: f.write('\n'.join(lines))
869,104
r""" Args: sourcecode (str): Returns: str: json formatted ipython notebook code cell CommandLine: python -m ibeis.templates.generate_notebook --exec-code_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>>...
def code_cell(sourcecode): r import utool as ut sourcecode = ut.remove_codeblock_syntax_sentinals(sourcecode) cell_header = ut.codeblock( ) cell_footer = ut.codeblock( ) if sourcecode is None: source_line_repr = ' []\n' else: lines = sourcecode.split('\n') ...
869,194
r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA ...
def markdown_cell(markdown): r import utool as ut markdown_header = ut.codeblock( ) markdown_footer = ut.codeblock( ) return (markdown_header + '\n' + ut.indent(repr_single_for_md(markdown), ' ' * 2) + '\n' + markdown_footer)
869,195
r""" Args: s (str): Returns: str: str_repr CommandLine: python -m ibeis.templates.generate_notebook --exec-repr_single_for_md --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> s = '#HTML(\'<iframe src=...
def repr_single_for_md(s): r import utool as ut if True: str_repr = ut.reprfunc(s) import re if str_repr.startswith('\''): dq = (ut.DOUBLE_QUOTE) sq = (ut.SINGLE_QUOTE) bs = (ut.BACKSLASH) dq_, sq_, bs_ = list(map(re.escape, [dq, sq, bs...
869,197
Check if all required services are provided Args: services: List with the service names which are required Returns: List with missing services
def get_missing_services(self, services): required_services = set(services) provided_services = set(self._services.keys()) missing_services = required_services.difference(provided_services) return sorted(missing_services)
869,291
Description: Calculates the Mean Squared Error (MSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray)
def mse(mean, estimator): return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
869,382
Description: Calculates the Sum of Squared Errors (SSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray)
def sse(mean, estimator): return np.sum((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
869,383
Description: Computes an additional value for the objective function value when used in an unconstrained optimization formulation. Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) c: constant multiplier scaling factor of the returned term
def uncons_term(params, c): return (c * ((np.sum(params[1:5]) - 1)**2)) + (c * ((np.sum(params[5:]) - 1)**2))
869,404
Description: Top 2 alternatives 12 moment conditions objective function Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) moments: values of the moment conditions from the data (numpy ndarray)
def top2_reduced(params, moments): params = np.asarray(params) alpha = params[0] a = params[1:5] b = params[5:] p = np.asarray(moments) p1 = alpha*a+(1-alpha)*b-p[:4] p21 = alpha*a[0]*a[2:]/(1-a[0])+(1-alpha)*b[0]*b[2:]/(1-b[0])-p[4:6] p22 = alpha*a[1]*np.hstack((a[0],a[3]))/(1-a[1]...
869,405
Description: Top 3 alternatives 16 moment conditions objective function Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) moments: values of the moment conditions from the data (numpy ndarray)
def top3_reduced(params, moments): params = np.asarray(params) alpha = params[0] a = params[1:5] b = params[5:] p = np.asarray(moments) p1 = alpha*a+(1-alpha)*b-p[:4] p21 = alpha*a[0]*a[2:]/(1-a[0])+(1-alpha)*b[0]*b[2:]/(1-b[0])-p[4:6] p22 = alpha*a[1]*np.hstack((a[0],a[3]))/(1-a[1]...
869,406
Description: Top m - 1 alternatives m(m - 1) + 2m moment conditions objective function Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) moments: values of the moment conditions from the data (numpy ndarray)
def top3_full(params, moments): #variables params = np.asarray(params) #convert numpy matrix to list alpha = params[0] #first parameter is the alpha value half = int((len(params) - 1) / 2) #assuming 2 mixtures a = params[1:half + 1] #first mixture b = params[half + 1:] #second mixture p...
869,407
Rename the root timer (regardless of current timing level). Args: name (any): Identifier, passed through str() Returns: str: Implemented identifier.
def rename_root(name): name = str(name) f.root.name = name f.root.times.name = name return name
869,418
Adjust the root timer save_itrs setting, such as for use in multiprocessing, when a root timer may become a parallel subdivision (see subdivide()). Args: setting (bool): Save individual iterations data, passed through bool() Returns: bool: Implemented setting value.
def set_save_itrs_root(setting): setting = bool(setting) f.root.times.save_itrs = setting return setting
869,419
Register stamps with the root timer (see subdivision()). Args: rgstr_stamps (list, tuple): Collection of identifiers, passed through set(), then each is passed through str(). Returns: list: Implemented registered stamp collection.
def rgstr_stamps_root(rgstr_stamps): rgstr_stamps = sanitize_rgstr_stamps(rgstr_stamps) f.root.rgstr_stamps = rgstr_stamps return rgstr_stamps
869,420
Description: Generate a Plackett-Luce dataset and save it to disk. Parameters: n: number of votes to generate m: number of alternatives outfile: open file object to which the dataset is written useDirichlet: boolean flag to use the Di...
def _generate_pl_dataset(n, m, outfile, useDirichlet): gamma, votes = generate_pl_dataset(n, m, useDirichlet) outfile.write(str(len(gamma)) + ',' + str(len(votes)) + '\n') outfile.write(','.join(map(str, gamma)) + '\n') for vote in votes: outfile.write(','.join(map(str, vote)) + '\n...
869,427
Description: Generate a Plackett-Luce dataset and return the parameters and votes Parameters: n: number of votes to generate m: number of alternatives useDirichlet: boolean flag to use the Dirichlet distribution
def generate_pl_dataset(n, m, useDirichlet=True): gamma = None if useDirichlet: gamma = np.random.dirichlet(np.ones(m)) else: gamma = np.random.rand(m) gamma /= np.sum(gamma) # normalize sum to 1.0 (not needed for Dirichlet) votes = [] for i in range(n): # gener...
869,428
Description: Read from disk a Plackett-Luce dataset. Parameters: infile: open file object from which to read the dataset
def read_pl_dataset(infile): m, n = [int(i) for i in infile.readline().split(',')] gamma = np.array([float(f) for f in infile.readline().split(',')]) if len(gamma) != m: infile.close() raise ValueError("malformed file: len(gamma) != m") votes = [] i = 0 for line i...
869,429
Description: Generate a Plackett-Luce vote given the model parameters. Parameters: m: number of alternatives gamma: parameters of the Plackett-Luce model
def draw_pl_vote(m, gamma): localgamma = np.copy(gamma) # work on a copy of gamma localalts = np.arange(m) # enumeration of the candidates vote = [] for j in range(m): # generate position in vote for every alternative # transform local gamma into intervals up to 1.0 local...
869,430
Description: Generate a Mixture of 2 Plackett-Luce models dataset and save it to disk. Parameters: n: number of votes to generate m: number of alternatives outfile: open file object to which the dataset is written useDirichlet: boolea...
def _generate_mix2pl_dataset(n, m, outfile, useDirichlet=True): params, votes = generate_mix2pl_dataset(n, m, useDirichlet) outfile.write(str(m) + ',' + str(n) + '\n') outfile.write(','.join(map(str, params)) + '\n') for vote in votes: outfile.write(','.join(map(str, vote)) + '\n') ...
869,431
Description: Read from disk a Mixture of 2 Plackett-Luce models dataset. Parameters: infile: open file object from which to read the dataset numVotes: number of votes to read from the file or all if None
def read_mix2pl_dataset(infile, numVotes=None): m, n = [int(i) for i in infile.readline().split(',')] if numVotes is not None and n < numVotes: raise ValueError("invalid number of votes to read: exceeds file amount") params = np.array([float(f) for f in infile.readline().split(',')]) ...
869,432
Description: Generate a mixture of 2 Plackett-Luce models dataset and return the parameters and votes. Parameters: n: number of votes to generate m: number of alternatives useDirichlet: boolean flag to use the Dirichlet distribution
def generate_mix2pl_dataset(n, m, useDirichlet=True): alpha = np.random.rand() gamma1 = None gamma2 = None if useDirichlet: gamma1 = np.random.dirichlet(np.ones(m)) gamma2 = np.random.dirichlet(np.ones(m)) else: gamma1 = np.random.rand(m) gamma1 /= ...
869,433
Description: Initializes the aggregator with the set of alternatives and the number of candidates Parameters: alts_list: the set of integer alternatives (a.k.a candidates)
def __init__(self, alts_list): self.alts = alts_list self.alts_set = set(alts_list) self.m = len(alts_list) if len(self.alts) != len(self.alts_set): raise ValueError("Alternatives must not contain duplicates") self.alts_to_ranks = None # Maps alternati...
869,578
Description: Returns the ranking of a given alternative in the computed aggregate ranking. An error is thrown if the alternative does not exist. The ranking is the index in the aggregate ranking, which is 0-indexed. Parameters: alt: the key tha...
def get_ranking(self, alt): if self.alts_to_ranks is None: raise ValueError("Aggregate ranking must be created first") try: rank = self.alts_to_ranks[alt] return rank except KeyError: raise KeyError("No alternative \"{}\" found in ...
869,579
Description: Takes in the scores of the alternatives in the form alt:score and generates the dictionaries mapping alternatives to rankings and rankings to alternatives. Parameters: alt_scores: dictionary of the scores of every alternative
def create_rank_dicts(self, alt_scores): self.alts_to_ranks = dict() cur_score = max(alt_scores.values()) cur_rank = 0 self.ranks_to_alts = {cur_rank:[]} for i in sorted(alt_scores.keys(), key=lambda x: -alt_scores[x]): if alt_scores[i] == cur_score: ...
869,581
Handles Datastore response errors according to their documentation. Parameters: error(dict) Returns: int or None: The max number of times this error should be retried or None if it shouldn't. See also: https://cloud.google.com/datastore/docs/concepts/er...
def _max_retries_for_error(self, error): status = error.get("status") if status == "ABORTED" and get_transactions() > 0: # Avoids retrying Conflicts when inside a transaction. return None return self._MAX_RETRIES.get(status)
869,635
Initialize services without authenticating to Globus Auth. Note: Clients may have reduced functionality without authentication. Arguments: services (str or list of str): The services to initialize clients for. Returns: dict: The clients requested, indexed by service name.
def anonymous_login(services): if isinstance(services, str): services = [services] clients = {} # Initialize valid services for serv in services: try: clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT) except KeyError: # No known client p...
869,642
Remove ALL tokens in the token directory. This will force re-authentication to all services. Arguments: token_dir (str): The path to the directory to save tokens in and look for credentials by default. If this argument was given to a ``login()`` function, the same value ...
def logout(token_dir=DEFAULT_CRED_PATH): for f in os.listdir(token_dir): if f.endswith("tokens.json"): try: os.remove(os.path.join(token_dir, f)) except OSError as e: # Eat ENOENT (no such file/dir, tokens already deleted) only, # ...
869,643
Translate a known Globus Search index into the index UUID. The UUID is the proper way to access indices, and will eventually be the only way. This method will return names it cannot disambiguate. Arguments: index_name (str): The name of the index. Returns: str: The UUID of the index. I...
def translate_index(index_name): uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower()) if not uuid: try: index_info = globus_sdk.SearchClient().get_index(index_name).data if not isinstance(index_info, dict): raise ValueError("Multiple UUIDs possible") ...
869,647
Generate a filename like Google for a song based on metadata. Parameters: metadata (~collections.abc.Mapping): A metadata dict. Returns: str: A filename string without an extension.
def suggest_filename(metadata): if 'title' in metadata and 'track_number' in metadata: # Music Manager. suggested_filename = f"{metadata['track_number']:0>2} {metadata['title']}" elif 'title' in metadata and 'trackNumber' in metadata: # Mobile. suggested_filename = f"{metadata['trackNumber']:0>2} {metadata['...
869,700
Clean up a query string for searching. Removes unmatched parentheses and joining operators. Arguments: q (str): Query string to be cleaned Returns: str: The clean query string.
def _clean_query_string(q): q = q.replace("()", "").strip() if q.endswith("("): q = q[:-1].strip() # Remove misplaced AND/OR/NOT at end if q[-3:] == "AND" or q[-3:] == "NOT": q = q[:-3] elif q[-2:] == "OR": q = q[:-2] # Balance parentheses while q.count("(") > q...
869,793
Validate and clean up a query to be sent to Search. Cleans the query string, removes unneeded parameters, and validates for correctness. Does not modify the original argument. Raises an Exception on invalid input. Arguments: query (dict): The query to validate. Returns: dict: The v...
def _validate_query(query): query = deepcopy(query) # q is always required if query["q"] == BLANK_QUERY["q"]: raise ValueError("No query specified.") query["q"] = _clean_query_string(query["q"]) # limit should be set to appropriate default if not specified if query["limit"] is Non...
869,794
Add a term to the query. Arguments: term (str): The term to add. Returns: SearchHelper: Self
def _term(self, term): # All terms must be strings for Elasticsearch term = str(term) if term: self.__query["q"] += term return self
869,796
Add a ``field:value`` term to the query. Matches will have the ``value`` in the ``field``. Note: This method triggers advanced mode. Arguments: field (str): The field to check for the value, in Elasticsearch dot syntax. value (str): The value to match. ...
def _field(self, field, value): # Fields and values must be strings for Elasticsearch field = str(field) value = str(value) # Check if quotes required and allowed, and quotes not present # If the user adds improper double-quotes, this will not fix them if (any([...
869,797
Sort the search results by a certain field. If this method is called multiple times, the later sort fields are given lower priority, and will only be considered when the eariler fields have the same value. Arguments: field (str): The field to sort by, in Elasticsearch dot syntax. ...
def _add_sort(self, field, ascending=True): # Fields must be strings for Elasticsearch field = str(field) # No-op on blank sort field if field: self.__query["sort"].append({ 'field_name': field, 'order': 'asc' if ascending else 'desc' ...
869,801
Retrieve and return the mapping for the given metadata block. Arguments: block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``), or the special values ``None`` for everything or ``"top"`` for just the top-level fields. ...
def show_fields(self, block=None): mapping = self._mapping() if block is None: return mapping elif block == "top": blocks = set() for key in mapping.keys(): blocks.add(key.split(".")[0]) block_map = {} for b in ...
869,814
Description: Top 2 alternatives 12 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top2_reduced(votes): res = np.zeros(12) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 2: res[4] += 1 elif vo...
869,908
Description: Top 2 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top2_full(votes): res = np.zeros(16) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 1: # i.e. the second alt is ranked second ...
869,909
Description: Top 3 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top3_reduced(votes): res = np.zeros(16) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 2: res[4] += 1 elif vo...
869,910
Description: Top m - 1 alternatives q = m(m - 1) + 2m moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top3_full(votes): #create array of zeros, length = q res = np.zeros(2 * len(votes[0]) + (len(votes[0]) * (len(votes[0]) - 1))) #iterate through each vote for vote in votes: #set verification boolean to true ver = True #check if vote belongs to c1 < c2 < c3, c2 < c3 < c1...
869,911
Produce a formatted record of a times data structure. Args: times (Times, optional): If not provided, uses the current root timer. Returns: str: Timer tree hierarchy in a formatted string. Raises: TypeError: If provided argument is not a Times object.
def write_structure(times=None): if times is None: return report_loc.write_structure(f.root.times) else: if not isinstance(times, Times): raise TypeError("Expected Times instance for param 'times' (default is root).") return report_loc.write_structure(times)
870,007
Description: Returns the first index of the array (vector) x containing the value i. Parameters: x: one-dimensional array i: search value
def get_index_nested(x, i): for ind in range(len(x)): if i == x[ind]: return ind return -1
870,694
Serialize and / or save a Times data object using pickle (cPickle). Args: filename (None, optional): Filename to dump to. If not provided, returns serialized object. times (None, optional): object to dump. If non provided, uses current root. Returns: pkl: Pickl...
def save_pkl(filename=None, times=None): if times is None: if not f.root.stopped: times = collapse.collapse_times() else: times = f.root.times else: if isinstance(times, (list, tuple)): for t in times: if not isinstance(t, Times): ...
871,280
Unpickle file contents. Args: filenames (str): Can be one or a list or tuple of filenames to retrieve. Returns: Times: A single object, or from a collection of filenames, a list of Times objects. Raises: TypeError: If any loaded object is not a Times object.
def load_pkl(filenames): if not isinstance(filenames, (list, tuple)): filenames = [filenames] times = [] for name in filenames: name = str(name) with open(name, 'rb') as file: loaded_obj = pickle.load(file) if not isinstance(loaded_obj, Times): ...
871,281
Create the main window. Args: size (tuple): The width and height of the window. samples (int): The number of samples. Keyword Args: fullscreen (bool): Fullscreen? title (bool): The title of the window. threaded (bool): Threaded? Retu...
def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window: if size is None: width, height = 1280, 720 else: width, height = size if samples < 0 or (samples & (samples - 1)) != 0: raise Exception('Invalid number of samples: %d' % sample...
871,407
r"""Provides a way for each connection wrapper to handle error responses. Parameters: response(Response): An instance of :class:`.requests.Response`. retries(int): The number of times :meth:`.request` has been called so far. \**kwargs: The parameters with which...
def _handle_response_error(self, response, retries, **kwargs): r error = self._convert_response_to_error(response) if error is None: return response max_retries = self._max_retries_for_error(error) if max_retries is None or retries >= max_retries: return ...
871,430
Subclasses may override this method in order to influence how errors are parsed from the response. Parameters: response(Response): The response object. Returns: object or None: Any object for which a max retry count can be retrieved or None if the error cannot be ...
def _convert_response_to_error(self, response): content_type = response.headers.get("content-type", "") if "application/x-protobuf" in content_type: self.logger.debug("Decoding protobuf response.") data = status_pb2.Status.FromString(response.content) status ...
871,431
Receives a list of events and transmits them to Riemann Arguments: events -- list of `tensor.objects.Event`
def eventsReceived(self, events): # Make sure queue isn't oversized if (self.maxsize < 1) or (len(self.events) < self.maxsize): self.events.extend(events)
871,671
Description: Full breaking Parameters: k: not used
def _full(self, k): G = np.ones((self.m, self.m)) #np.fill_diagonal(G, 0) # erroneous code from prefpy return G
871,778
Description: Top k breaking Parameters: k: the number of alternatives to break from highest rank
def _top(self, k): if k > self.m: raise ValueError("k larger than the number of alternatives") G = np.ones((self.m, self.m)) #np.fill_diagonal(G, 0) # erroneous code from prefpy for i in range(self.m): for j in range(self.m): if i...
871,779
Description: Bottom k breaking Parameters: k: the number of alternatives to break from lowest rank
def _bot(self, k): if k < 2: raise ValueError("k smaller than 2") G = np.ones((self.m, self.m)) np.fill_diagonal(G, 0) for i in range(self.m): for j in range(self.m): if i == j: continue if i <...
871,780
Description: Position k breaking Parameters: k: position k is used for the breaking
def _pos(self, k): if k < 2: raise ValueError("k smaller than 2") G = np.zeros((self.m, self.m)) for i in range(self.m): for j in range(self.m): if i == j: continue if i < k or j < k: ...
871,782
Description: Takes in a set of rankings and computes the Plackett-Luce model aggregate ranking. Parameters: rankings: set of rankings to aggregate breaking: type of breaking to use k: number to be used for top, bottom, and position breakin...
def aggregate(self, rankings, breaking="full", k=None): breakings = { "full": self._full, "top": self._top, "bottom": self._bot, "adjacent": self._adj, "position": self._pos } if (k == ...
871,783
Description: Minorization-Maximization algorithm which returns an estimate of the ground-truth parameters, gamma for the given data. Parameters: rankings: set of rankings to aggregate epsilon: convergence condition value, set to None for itera...
def aggregate(self, rankings, epsilon, max_iters): # compute the matrix w, the numbers of pairwise wins: w = np.zeros((self.m, self.m)) for ranking in rankings: localw = np.zeros((self.m, self.m)) for ind1, alt1 in enumerate(self.alts): f...
871,817
Usage: var list var delete NAMES var NAME=VALUE var NAME Arguments: NAME Name of the variable NAMES Names of the variable separated by spaces VALUE VALUE to be assigned special vars date and time are defined
def do_var(self, arg, arguments): if arguments['list'] or arg == '' or arg is None: self._list_variables() return elif arguments['NAME=VALUE'] and "=" in arguments["NAME=VALUE"]: (variable, value) = arg.split('=', 1) if value == "time" or value =...
871,897
cm. Usage: cm [-q] help cm [-v] [-b] [--file=SCRIPT] [-i] [COMMAND ...] Arguments: COMMAND A command to be executed Options: --file=SCRIPT -f SCRIPT Executes the script -i After start keep the shell interactive, ot...
def main(): echo = False try: arguments = docopt(main.__doc__, help=True) # fixing the help parameter parsing if arguments['help']: arguments['COMMAND'] = ['help'] arguments['help'] = 'False' script_file = arguments['--file'] interactiv...
871,958
Constructor. Initializes various variables, setup the HTTP handler and stores all values Args: prefix: The prefix of the urls. Raises: AttributeError: if not all values for parameters in `url_fields` are passed
def __init__(self, prefix='/api/2/', **url_values): self._http = registry.http_handler self._prefix = prefix self._modified_fields = {} self._populated_fields = {} for field in url_values: if field in self.url_fields: setattr(self, field, url...
871,986
Resolve a value from :attr:`resolver_dict` based on the :attr:`data_format`. Args: data_format (:class:`~.DataFormat` or str): The data format; must be a member of :class:`~.DataFormat` or a string equivalent. resolver_dict (dict): the resolving dict. Can hold any value ...
def _data_format_resolver(data_format, resolver_dict): try: data_format = DataFormat(data_format) except ValueError: supported_formats = ', '.join( ["'{}'".format(f.value) for f in DataFormat]) raise ValueError(("'data_format' must be one of {formats}. Given " ...
872,030
:: Usage: man COMMAND man [--noheader] Options: --norule no rst header Arguments: COMMAND the command to be printed Description: man Prints ou...
def do_man(self, args, arguments): if arguments['COMMAND'] is None: print print "Commands" print 70 * "=" commands = [k for k in dir(self) if k.startswith("do_")] commands.sort() else: print arguments command...
872,119
Return text in camelCase style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> camelcase("hello world") 'helloWorld' >>> camelcase("HELLO_HTML_WORLD", True, ["HTML"]) 'helloHTMLWorld'
def camelcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) if words: words[0] = words[0].lower() return ''.join(words)
872,455
Return text in PascalCase style (aka MixedCase). Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> pascalcase("hello world") 'HelloWorld' >>> pascalcase("HELLO_HTML_WORLD", True, ["HTML"]) ...
def pascalcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return ''.join(words)
872,456
Return text in CONST_CASE style (aka SCREAMING_SNAKE_CASE). Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> constcase("hello world") 'HELLO_WORLD' >>> constcase("helloHTMLWorld", True, ["HTML...
def constcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return '_'.join([w.upper() for w in words])
872,457
Return text in dot.case style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> dotcase("hello world") 'hello.world' >>> dotcase("helloHTMLWorld", True, ["HTML"]) 'hello.html.world'
def dotcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return '.'.join([w.lower() for w in words])
872,458
Return text in "seperate words" style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> separate_words("HELLO_WORLD") 'HELLO WORLD' >>> separate_words("helloHTMLWorld", True, ["HTML"]) 'he...
def separate_words(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms, preserve_case=True) return ' '.join(words)
872,459
Segment string on separator into list of words. Arguments: string -- the string we want to process Returns: words -- list of words the string got minced to separator -- the separator char intersecting words was_upper -- whether string happened to be upper-case
def _separate_words(string): words = [] separator = "" # Index of current character. Initially 1 because we don't want to check # if the 0th character is a boundary. i = 1 # Index of first character in a sequence s = 0 # Previous character. p = string[0:1] # Treat an all-c...
872,541
:: Usage: edit FILENAME Edits the file with the given name Arguments: FILENAME the file to edit
def do_edit(self, arg, arguments): def _create_file(filename): if not os.path.exists(filename): file(filename, 'w+').close() def _edit(prefix, editors, filename): for editor in editors: if os.path.exists(editor): _cre...
872,665
:: Usage: graphviz FILENAME Export the data in cvs format to a file. Former cvs command Arguments: FILENAME The filename
def do_graphviz(self, args, arguments): filename = arguments['FILENAME'] if platform.system() == 'Darwin': if os.path.isfile(filename): os.system("open -a '\''/Applications/Graphviz.app'\'' " + filename)
872,818
:: Usage: dot2 FILENAME FORMAT Export the data in cvs format to a file. Former cvs command Arguments: FILENAME The filename FORMAT the export format, pdf, png, ...
def do_dot2(self, args, arguments): filename = arguments['FILENAME'] output_format = arguments['FORMAT'] base = filename.replace(".dot", "") out = base + "." + output_format if output_format == "pdf": exec_command = "dot -Tps %s | epstopdf --filter --ooutput ...
872,819
Start sirbot Configure sirbot and start the aiohttp.web.Application Args: host (str): host port (int): port
def run(self, host: str = '0.0.0.0', port: int = 8080): self._loop.run_until_complete(self._configure_plugins()) web.run_app(self._app, host=host, port=port)
872,998
Table Constructor todo::make sure this is memory efficient Args: Index (Index): An Index object with a valid .query method and a .columns attribute. Returns: A table object Usage example >>> Table(ind)
def __init__(self, index, port = 8081): self.index = index self.server = None self.port = port if port else find_free_port() self.settings = index.columns self.docs = index.docs self._create_settings() self.html_path = get_cur_path()+'/data/table/' ...
872,999
Initializer for the base class. Save the hostname to use for all requests as well as any authentication info needed. Args: hostname: The host for the requests. auth: The authentication info needed for any requests.
def __init__(self, hostname, auth=AnonymousAuth()): self._hostname = self._construct_full_hostname(hostname) _logger.debug("Hostname is %s" % self._hostname) self._auth_info = auth
873,136
Create a full (scheme included) hostname from the argument given. Only HTTP and HTTP+SSL protocols are allowed. Args: hostname: The hostname to use. Returns: The full hostname. Raises: ValueError: A not supported protocol is used.
def _construct_full_hostname(self, hostname): if hostname.startswith(('http://', 'https://', )): return hostname if '://' in hostname: protocol, host = hostname.split('://', 1) raise ValueError('Protocol %s is not supported.' % protocol) return '://'....
873,137
Add the authentication info to the supplied dictionary. We use the `requests.HTTPBasicAuth` class as the `auth` param. Args: `request_args`: The arguments that will be passed to the request. Returns: The updated arguments for the request.
def populate_request_data(self, request_args): request_args['auth'] = HTTPBasicAuth( self._username, self._password) return request_args
873,321
:: Usage: exec FILENAME executes the commands in the file. See also the script command. Arguments: FILENAME The name of the file
def do_exec(self, filename): if not filename: Console.error("the command requires a filename as parameter") return if os.path.exists(filename): with open(filename, "r") as f: for line in f: Console.ok("> {:}".format(str(li...
873,600
MathList Constructor todo:: share a port among lists. Or maybe close the server after serving from it? Args: lst (list): A list of LaTeX math to be rendered by KaTeX Returns: A math list object Usage example >>> lst = ["\int x = y", "x + 6"] >>>...
def __init__(self, lst): list.__init__(self, lst) self.server = None self.port = find_free_port() self.html_path = get_cur_path()+'/data/math_list/index.html'
873,719
:: Usage: open FILENAME ARGUMENTS: FILENAME the file to open in the cwd if . is specified. If file in in cwd you must specify it with ./FILENAME Opens the given URL in a browser window.
def do_open(self, args, arguments): filename = arguments['FILENAME'] filename = self._expand_filename(filename) Console.ok("open {0}".format(filename)) if not (filename.startswith("file:") or filename.startswith("http:")): try: with open(filename): ...
873,868
Creates an entity in Mambu This method must be implemented in child classes Args: data (dictionary): dictionary with data to send, this dictionary is specific for each Mambu entity
def create(self, data, *args, **kwargs): # if module of the function is diferent from the module of the object # that means create is not implemented in child class if self.create.__func__.__module__ != self.__module__: raise Exception("Child method not implemented") ...
873,886
Make a request. Use the `requests` module to actually perform the request. Args: `method`: The method to use. `path`: The path to the resource. `data`: Any data to send (for POST and PUT requests). `kwargs`: Other parameters for `requests`. Retur...
def _make_request(self, method, path, data=None, **kwargs): _logger.debug("Method for request is %s" % method) url = self._construct_full_url(path) _logger.debug("URL for request is %s" % url) self._auth_info.populate_request_data(kwargs) _logger.debug("The arguments are...
874,033
Send data to a remote server, either with a POST or a PUT request. Args: `method`: The method (POST or PUT) to use. `path`: The path to the resource. `data`: The data to send. `filename`: The filename of the file to send (if any). Returns: The...
def _send(self, method, path, data, filename): if filename is None: return self._send_json(method, path, data) else: return self._send_file(method, path, data, filename)
874,034
Make a application/json request. Args: `method`: The method of the request (POST or PUT). `path`: The path to the resource. `data`: The JSON-encoded data. Returns: The content of the response. Raises: An exception depending on the HTTP...
def _send_json(self, method, path, data): headers = {'Content-type': 'application/json'} return self._make_request(method, path, data=data, headers=headers)
874,035
Make a multipart/form-encoded request. Args: `method`: The method of the request (POST or PUT). `path`: The path to the resource. `data`: The JSON-encoded data. `filename`: The filename of the file to send. Returns: The content of the response...
def _send_file(self, method, path, data, filename): with open(filename, 'r') as f: return self._make_request(method, path, data=data, files=[f, ])
874,036
Execute rcon command on server and fetch result Args: command --- executed command timeout --- read timeout Returns: bytes response
def execute(self, command, timeout=1): self.send(command) return self.read_untill(timeout)
874,301
Extract index and watch from :class:`Blocking` Parameters: obj (Blocking): the blocking object Returns: tuple: index and watch
def extract_blocking(obj): if isinstance(obj, tuple): try: a, b = obj except: raise TypeError("Not a Blocking object") else: a, b = obj, None return extract_attr(a, keys=["Index"]), b
874,394
Perform a request. Args: request_method: HTTP method for this request. api_method: API method name for this request. *args: Extra arguments to pass to the request. **kwargs: Extra keyword arguments to pass to the request. Returns: A dict cont...
def request(self, request_method, api_method, *args, **kwargs): url = self._build_url(api_method) resp = requests.request(request_method, url, *args, **kwargs) try: rv = resp.json() except ValueError: raise RequestFailedError(resp, 'not a json body') ...
874,754
Returns the specified key Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency
async def _read(self, path, *, raw=None, recurse=None, dc=None, separator=None, keys=None, watch=None, consistency=None): response = await self._api.get( ...
875,078
Returns the specified key Parameters: key (str): Key to fetch dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency ...
async def raw(self, key, *, dc=None, watch=None, consistency=None): response = await self._read(key, dc=dc, raw=True, watch=watch, consistency=consistency) ...
875,081
Sets the key to the given value. Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags flags (int): Flags to set with value Returns: bool: ``True`` on success
async def set(self, key, value, *, flags=None): value = encode_value(value, flags) response = await self._write(key, value, flags=flags) return response.body is True
875,083
Locks the Key with the given Session. Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags session (ObjectID): Session ID flags (int): Flags to set with value Response: bool: ``True`` on success ...
async def lock(self, key, value, *, flags=None, session): value = encode_value(value, flags) session_id = extract_attr(session, keys=["ID"]) response = await self._write(key, value, flags=flags, acquire=session_id...
875,085
Unlocks the Key with the given Session. Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags session (ObjectID): Session ID flags (int): Flags to set with value Response: bool: ``True`` on success ...
async def unlock(self, key, value, *, flags=None, session): value = encode_value(value, flags) session_id = extract_attr(session, keys=["ID"]) response = await self._write(key, value, flags=flags, release=session_...
875,086
Deletes the Key Parameters: key (str): Key to delete Response: bool: ``True`` on success
async def delete(self, key): response = await self._discard(key) return response.body is True
875,088
Deletes all keys with a prefix of Key. Parameters: key (str): Key to delete separator (str): Delete only up to a given separator Response: bool: ``True`` on success
async def delete_tree(self, prefix, *, separator=None): response = await self._discard(prefix, recurse=True, separator=separator) return response.body is True
875,089
Deletes the Key with check-and-set semantics. Parameters: key (str): Key to delete index (ObjectIndex): Index ID Response: bool: ``True`` on success The Key will only be deleted if its current modify index matches the supplied Index.
async def delete_cas(self, key, *, index): index = extract_attr(index, keys=["ModifyIndex", "Index"]) response = await self._discard(key, cas=index) return response.body is True
875,090
Sets the Key to the given Value Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags flags (int): Flags to set with value
def set(self, key, value, *, flags=None): self.append({ "Verb": "set", "Key": key, "Value": encode_value(value, flags, base64=True).decode("utf-8"), "Flags": flags }) return self
875,093
Sets the Key to the given Value with check-and-set semantics Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags index (ObjectIndex): Index ID flags (int): Flags to set with value The Key will only be set if its c...
def cas(self, key, value, *, flags=None, index): self.append({ "Verb": "cas", "Key": key, "Value": encode_value(value, flags, base64=True).decode("utf-8"), "Flags": flags, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) ...
875,094
Locks the Key with the given Session Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags session (ObjectID): Session ID The Key will only be set if its current modify index matches the supplied Index
def lock(self, key, value, *, flags=None, session): self.append({ "Verb": "lock", "Key": key, "Value": encode_value(value, flags, base64=True).decode("utf-8"), "Flags": flags, "Session": extract_attr(session, keys=["ID"]) }) re...
875,095
Fails the transaction if Key does not have a modify index equal to Index Parameters: key (str): Key to check index (ObjectIndex): Index ID
def check_index(self, key, *, index): self.append({ "Verb": "check-index", "Key": key, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) return self
875,096
Fails the transaction if Key is not currently locked by Session Parameters: key (str): Key to check session (ObjectID): Session ID
def check_session(self, key, *, session=None): self.append({ "Verb": "check-session", "Key": key, "Session": extract_attr(session, keys=["ID"]) }) return self
875,097
Deletes the Key with check-and-set semantics. Parameters: key (str): Key to delete index (ObjectIndex): Index ID The Key will only be deleted if its current modify index matches the supplied Index
def delete_cas(self, key, *, index): self.append({ "Verb": "delete-cas", "Key": key, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) return self
875,098
Execute stored operations Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. token (ObjectID): Token ID Returns: Collection: Results of operations. Raises: TransactionError: ...
async def execute(self, dc=None, token=None): token_id = extract_attr(token, keys=["ID"]) try: response = await self._api.put( "/v1/txn", data=self.operations, params={ "dc": dc, "token": token_i...
875,099
Destroys a given session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: bool: ``True`` on success
async def destroy(self, session, *, dc=None): session_id = extract_attr(session, keys=["ID"]) response = await self._api.put("/v1/session/destroy", session_id, params={"dc": dc}) return response.body is True
875,191