docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns an integer that has the value of the decimal string: dec_str*10^decimals Arguments: dec_str (string) that represents a decimal number decimals (int): number of decimals for creating the integer output Returns: (int) Raises: ValueError if dec_string is not a v...
def decstr2int(dec_str, decimals): if not isinstance(decimals, int): raise TypeError('decimals must be an integer') try: dollars, cents = dec_str.split('.') except ValueError: if '.' not in dec_str: dollars = dec_str cents = '0' else: ...
1,131,989
Deposits specified neopoints into the user's account, returns result Parameters: amount (int) -- Amount of neopoints to deposit Returns bool - True if successful, False otherwise Raises notEnoughNps
def deposit(self, amount): pg = self.usr.getPage("http://www.neopets.com/bank.phtml") if self.usr.nps < int(amount): raise notEnoughNps form = pg.form(action="process_bank.phtml") form.update({'type': 'deposit', 'amount': str(amount)}) f...
1,132,062
Withdraws specified neopoints from the user's account, returns result Parameters: amount (int) -- Amount of neopoints to withdraw Returns bool - True if successful, False otherwise Raises notEnoughBalance
def withdraw(self, amount): pg = self.usr.getPage("http://www.neopets.com/bank.phtml") try: results = pg.find(text = "Account Type:").parent.parent.parent.find_all("td", align="center") self.balance = results[1].text.replace(" NP", "") except Exception: ...
1,132,063
Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError
def get_hook(hook_name): if not pkg_resources.resource_exists(__name__, hook_name): raise HookNotFoundError return pkg_resources.resource_string(__name__, hook_name)
1,132,110
Returns ICachableItem that matches id Args: id: String that identifies the item to return whose key matches
def getById(self, Id): # we need to create a new object to insure we don't corrupt the generator count csvsource = CSVSource(self.source, self.factory, self.key()) try: for item in csvsource.items(): if Id == item.getId(): return item ...
1,132,420
Wrap a widget to conform with Bootstrap's html control design. Args: input_class: Class to give to the rendered <input> control. add_meta: bool:
def bootstrap_styled(cls=None, add_meta=True, form_group=True, input_class='form-control'): def real_decorator(cls): class NewClass(cls): pass NewClass.__name__ = cls.__name__ NewClass = custom_widget_wrapper(NewClass) _call = NewClass.__call__ def call(*args, **...
1,132,567
Walk on rule content tokens to return a dict of properties. This is pretty naive and will choke/fail on everything that is more evolved than simple ``ident(string):value(string)`` Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by ti...
def digest_content(self, rule): data = OrderedDict() current_key = None for token in rule.content: # Assume first identity token is the property name if token.type == 'ident': # Ignore starting '-' from css variables name = token...
1,132,986
Parse source and consume tokens from tinycss2. Arguments: source (string): Source content to parse. Returns: dict: Retrieved rules.
def consume(self, source): manifest = OrderedDict() rules = parse_stylesheet( source, skip_comments=True, skip_whitespace=True, ) for rule in rules: # Gather rule selector+properties name = self.digest_prelude(rule) ...
1,132,987
Try to guess how much pages are in book listing. Args: dom (obj): HTMLElement container of the page with book list. Returns: int: Number of pages for given category.
def _get_max_page(dom): div = dom.find("div", {"class": "razeniKnihListovani"}) if not div: return 1 # isolate only page numbers from links links = div[0].find("a") max_page = filter( lambda x: "href" in x.params and "pageindex=" in x.params["href"], links ) ma...
1,133,118
Parse links to the details about publications from page with book list. Args: dom (obj): HTMLElement container of the page with book list. Returns: list: List of strings / absolute links to book details.
def _parse_book_links(dom): links = [] picker = lambda x: x.params.get("class", "").startswith("boxProKnihy") for el in dom.find(None, fn=picker): book_ref = el.find("a") if not book_ref or "href" not in book_ref[0].params: continue links.append(book_ref[0].params...
1,133,119
Go thru `links` to categories and return list to all publications in all given categories. Args: links (list): List of strings (absolute links to categories). Returns: list: List of strings / absolute links to book details.
def get_book_links(links): book_links = [] for link in links: data = DOWNER.download(link + "1") dom = dhtmlparser.parseString(data) book_links.extend(_parse_book_links(dom)) max_page = _get_max_page(dom) if max_page == 1: continue for i in ra...
1,133,120
Parse informations about authors of the book. Args: dom (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`.Author` objects. Blank if no author \ found.
def _parse_authors(authors): link = authors.find("a") link = link[0].params.get("href") if link else None author_list = _strip_content(authors) if "(" in author_list: author_list = author_list.split("(")[0] if not author_list.strip(): return [] return map( lambda...
1,133,121
Download and parse available informations about book from the publishers webpages. Args: link (str): URL of the book at the publishers webpages. Returns: obj: :class:`.Publication` instance with book details.
def _process_book(link): # download and parse book info data = DOWNER.download(link) dom = dhtmlparser.parseString( utils.handle_encodnig(data) ) dhtmlparser.makeDoubleLinked(dom) # some books are without price in expected elements, this will try to get # it from elsewhere ...
1,133,122
Serialize this keymap as a JSON formatted stream to the *fp*. Arguments: fp: A ``.write()``-supporting file-like object to write the generated JSON to (default is ``sys.stdout``). **kwargs: Options to be passed into :func:`json.dumps`.
def dump(self, fp=sys.stdout, **kwargs): fp.write(FILE_HEADER) fp.write(self.to_json(**kwargs)) fp.write('\n')
1,133,512
Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self
def extend(self, *bindings): self._bindings.extend(self._preprocess(bindings)) return self
1,133,513
Bind the keys to the specified *command* with some *args*. Arguments: command (str): Name of the ST command (e.g. ``insert_snippet``). **args: Arguments for the command. Returns: Binding: self
def to(self, command, **args): self.command = command self.args = args return self
1,133,518
Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context:
def when(self, key): ctx = Context(key, self) self.context.append(ctx) return ctx
1,133,519
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default ...
def __init__(self, conf_path, project_key=None, run_asyncore_thread=True): self.conf_path = conf_path super(ZEOConfWrapper, self).__init__( project_key=project_key, run_asyncore_thread=run_asyncore_thread, )
1,133,568
Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory...
def extant_item(arg, arg_type): if arg_type == "file": if not os.path.isfile(arg): raise argparse.ArgumentError( None, "The file {arg} does not exist.".format(arg=arg)) else: # File exists so return the filename return arg ...
1,133,588
Parse the args using the config_file, input_dir, output_dir pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
def parse_config_input_output(args=sys.argv): parser = argparse.ArgumentParser( description='Process the input files using the given config') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) parser.add_argument( 'i...
1,133,589
Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
def parse_config(args=sys.argv): parser = argparse.ArgumentParser( description='Read in the config file') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) return parser.parse_args(args[1:])
1,133,590
Returns current ICachedItem for ICachableItem Args: CachableItem: ICachableItem, used as a reference to find a cached version Returns: ICachedItem or None, if CachableItem has not been cached
def get(self, CachableItem): return self.session.\ query(self.mapper.factory().__class__).\ filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\ first()
1,133,607
Formats a datatype value to a SPARQL representation args: item: the datatype object dt_format: the return format ['turtle', 'uri']
def format_sparql(item, dt_format='turtle', **kwargs): try: rtn_val = json.dumps(item.value) rtn_val = item.value except: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): rtn_val = item.value.isoformat() else: ...
1,133,615
Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method
def _format(self, method="sparql", dt_format="turtle"): try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) except KeyError: raise NotImplementedError("'{}' is not a valid format method" "".format(method))
1,133,616
cycles through the instanciated namespaces to see if it has already been created args: namespace: tuple of prefix and uri to check
def __is_new_ns__(cls, namespace): for ns in cls._ns_instances: if ns[0] == namespace[0] and ns[1] == namespace[1]: return ns elif ns[0] == namespace[0] and ns[1] != namespace[1]: raise NsPrefixExistsError(namespace, ...
1,133,620
Extends the function to add an attribute to the class for each added namespace to allow for use of dot notation. All prefixes are converted to lowercase Args: prefix: string of namespace name namespace: rdflib.namespace instance kwargs: calc: whether...
def bind(self, prefix, namespace, *args, **kwargs): # RdfNamespace(prefix, namespace, **kwargs) setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs)) if kwargs.pop('calc', True): self.__make_dicts__
1,133,628
Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary
def load(self, filepath, file_encoding=None): with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith("@prefix"): self._add_ttl_ns(current_line.replace("\n","")) ...
1,133,630
Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri
def dict_load(self, ns_dict): for prefix, uri in ns_dict.items(): self.bind(prefix, uri, override=False, calc=False) self.__make_dicts__
1,133,631
takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string
def _add_ttl_ns(self, line): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) lg.debug("line:\n%s", line) line = str(line).strip() # if the line is not a prefix line exit if line is None or line == 'none' or line == ...
1,133,632
will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove
def del_ns(self, namespace): # remove the item from the namespace dict namespace = str(namespace) attr_name = None if hasattr(self, namespace): delattr(self, namespace)
1,133,633
Converts py_uri or ttl uri to a http://... full uri format Args: value: the string to convert Returns: full uri of an abbreivated uri
def uri(self, value, strip_iri=True): return self.convert_to_uri(value, strip_iri=strip_iri)
1,133,634
converts a prefixed rdf ns equivalent value to its uri form. If not found returns the value as is args: value: the URI/IRI to convert strip_iri: removes the < and > signs rdflib_uri: returns an rdflib URIRef
def convert_to_uri(self, value, strip_iri=True): parsed = self.parse_uri(str(value)) try: new_uri = "%s%s" % (self.ns_dict[parsed[0]], parsed[1]) if not strip_iri: return self.iri(new_uri) return new_uri except KeyError: r...
1,133,635
takes an value and returns a tuple of the parts args: value: a uri in any form pyuri, ttl or full IRI
def get_uri_parts(self, value): if value.startswith('pyuri_'): value = self.rpyhttp(value) parts = self.parse_uri(value) try: return (self.ns_dict[parts[0]], parts[1]) except KeyError: try: return (self.ns_dict[parts[0].lower()...
1,133,636
converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <>
def iri(uri_string): uri_string = str(uri_string) if uri_string[:1] == "?": return uri_string if uri_string[:1] == "[": return uri_string if uri_string[:1] != "<": uri_string = "<{}".format(uri_string.strip()) if uri_string[len(uri_str...
1,133,639
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is. args: value: the value to convert
def convert_to_ttl(self, value): parsed = self.parse_uri(value) try: rtn_val = "%s:%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.iri(self.rpyhttp(value)) return rtn_val
1,133,640
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is args: value: the value to convert
def convert_to_ns(self, value): parsed = self.parse_uri(value) try: rtn_val = "%s_%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.pyhttp(value) return rtn_val
1,133,641
Parses a value into a head and tail pair based on the finding the last '#' or '/' as is standard with URI fromats args: value: string value to parse returns: tuple: (lookup, end)
def parse_uri(value): value = RdfNsManager.clean_iri(value) lookup = None end = None try: lookup = value[:value.rindex('#')+1] end = value[value.rindex('#')+1:] except ValueError: try: lookup = value[:value.rindex('/')...
1,133,642
method to send a message to a user Parameters: to -> recipient msg -> message to send label -> application description title -> name of the notification event uri -> callback uri
def send_notification(self, to=None, msg=None, label=None, title=None, uri=None): url = self.root_url + "send_notification" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg if label...
1,133,683
method to send a message to a user Parameters: to -> recipient msg -> message to send
def send_message(self, to=None, msg=None): url = self.root_url + "send_message" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg return self._query(url, values)
1,133,684
query method to do HTTP POST/GET Parameters: url -> the url to POST/GET data -> header_data as a dict (only for POST) Returns: Parsed JSON data as dict or None on error
def _query(self, url, data = None): auth = encodestring('%s:%s' % (self.user, self.secret)).replace('\n', '') if data is not None: # we have POST data if there is data values = urllib.urlencode(data) request = urllib2.Request(url, values) request.add_header(...
1,133,685
queries the database and returns that status of the item. args: status_item: the name of the item to check
def get(self, status_item): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = value = self.conn.query(sparql=sparql.format(self.group, status_item)) if len(value) > 0 and \ cbool(value[0].get('loaded...
1,133,782
sets the status item to the passed in paramaters args: status_item: the name if the item to set status: boolean value to set the item
def set(self, status_item, status): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = return self.conn.query(sparql=sparql.format(self.group, status_item, ...
1,133,783
Iterates through a list of queries and runs them through the connection Args: ----- queries: list of strings or tuples containing (query_string, kwargs) conn: the triplestore connection to use
def run_query_series(queries, conn): results = [] for item in queries: qry = item kwargs = {} if isinstance(item, tuple): qry = item[0] kwargs = item[1] result = conn.update_query(qry, **kwargs) # pdb.set_trace() results.append(result)...
1,133,949
Returns all the triples for a specific are graph args: graph: the URI of the graph to retreive conn: the rdfframework triplestore connection
def get_graph(graph, conn, **kwargs): sparql = render_without_request("sparqlGraphDataTemplate.rq", prefix=NSM.prefix(), graph=graph) return conn.query(sparql, **kwargs)
1,133,951
Make the filter section for a query template args: filters: list of dictionaries to generate the filter example: filters = [{'variable': 'p', 'operator': '=', 'union_type': '||', 'values': ['rdf:type', 'rdfs:label']}]
def make_sparql_filter(filters): def make_filter_str(variable, operator, union_type, values): formated_vals = UniqueList() for val in values: try: formated_vals.append(val.sparql) except AttributeError: formated_vals.append(val) ...
1,133,952
Detects and opens compressed files Args: parser (ArgumentParser): parser used to generate values namespace (Namespace): namespace to set values for value (str): actual value specified by user option_string (str): argument flag used to call this function ...
def __call__(self, parser, namespace, value, option_string=None, **kwargs): handle = copen(value, mode=self.mode, **self.kwargs) setattr(namespace, self.dest, handle)
1,134,013
Converts an ill-conditioned correlation matrix into well-conditioned matrix with one common correlation coefficient Parameters: ----------- R : ndarray an illconditioned correlation matrix, e.g. oxyba.illcond_corrmat Return: ------- cmat : ndarray DxD matr...
def onepara(R): import numpy as np import warnings d = R.shape[0] if d < 2: raise Exception(( "More than one variable is required." "Supply at least a 2x2 matrix.")) # the explicit solution x = (np.sum(R) + np.trace(R)) / (d**2 - d) if x < (-1. / (d -...
1,134,317
Takes a list of Item objects and returns a list of Item objects with respective prices modified Uses the given list of item objects to formulate a query to the item database. Uses the returned results to populate each item in the list with its respective price, then returns the modified list. ...
def priceItems(items): retItems = [] sendItems = [] for item in items: sendItems.append(item.name) resp = CodexAPI.searchMany(sendItems) for respItem in resp: retItems[respItem['name']].price = respItem['price'] ...
1,134,477
Own "dumb" reimplementation of textwrap.wrap(). This is because calling .wrap() on bigger strings can take a LOT of processor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of text without spaces. Args: text (str): Text to wrap. columns (int): Wrap after `columns` chara...
def _wrap(text, columns=80): out = [] for cnt, char in enumerate(text): out.append(char) if (cnt + 1) % columns == 0: out.append("\n") return "".join(out)
1,134,481
checks to see if the server_core is running args: delay: will cycle till core is up. timeout: number of seconds to wait
def verify_server_core(timeout=120, start_delay=90): timestamp = time.time() last_check = time.time() + start_delay - 10 last_delay_notification = time.time() - 10 server_down = True return_val = False timeout += 1 # loop until the server is up or the timeout is reached while((time....
1,134,540
loads an rml mapping into memory args: rml_name(str): the name of the rml file
def load_rml(self, rml_name): conn = CFG.rml_tstore cache_path = os.path.join(CFG.CACHE_DATA_PATH, 'rml_files', rml_name) if not os.path.exists(cache_path): results = get_graph(NSM.uri(getattr(NSM.kdr, rml_name), False), conn) with...
1,134,543
loads the RDF/turtle application data to the triplestore args: reset(bool): True will delete the definition dataset and reload all of the datafiles.
def _load_data(self, reset=False): log = logging.getLogger("%s.%s" % (self.log_name, inspect.stack()[0][3])) log.setLevel(self.log_level) for attr, obj in self.datafile_obj.items(): if reset or obj['latest_mod'] > obj['last_json_mo...
1,134,546
Define a subcommand. Args: *args (str): Sequence of program arguments needed to run the command. directory (Optional[str]): Directory the command is run in. env_vars (Optional[dict]): Environment variable to feed to the subcommand.
def __init__(self, *args, **kwargs): self.command = list(args) self.directory = kwargs['directory'] if 'directory' in kwargs else None self.env_vars = kwargs['env_vars'] if 'env_vars' in kwargs else None
1,134,671
Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object.
def subcommand(self, *args): return Subcommand(*args, directory=self.directory, env_vars=self.env_vars)
1,134,677
Forecasts how long a backlog will take to complete given the historical values provided. Arguments: throughputs(List[int]): Number of units completed per unit of time (stories per week, story points per month, etc.) backlog_size(int): Units in the backlog (stories, points, etc.) ...
def forecast(self, throughputs, backlog_size, num_simulations=10000, max_periods=10000, seed=None): self._check_throughputs(throughputs) results = [] if seed is not None: random.seed(seed) for i in range(0, num_simulations): simulated_backlog = backlog_...
1,134,796
Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title.
def _parse_alt_title(html_chunk): title = html_chunk.find( "input", {"src": "../images_buttons/objednat_off.gif"} ) assert title, "Can't find alternative title!" title = title[0] assert "title" in title.params, "Can't find alternative title source!" # title is stored as ...
1,134,825
Parse title/name of the book and URL of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (title, url), both as strings.
def _parse_title_url(html_chunk): title = html_chunk.find("div", {"class": "comment"}) if not title: return _parse_alt_title(html_chunk), None title = title[0].find("h2") if not title: return _parse_alt_title(html_chunk), None # look for the url of the book if present url...
1,134,826
Parse authors of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
def _parse_authors(html_chunk): authors = html_chunk.match( ["div", {"class": "comment"}], "h3", "a", ) if not authors: return [] authors = map( lambda x: Author( # create Author objects x.getContent().strip(), ...
1,134,827
Parse format, number of pages and ISBN. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (format, pages, isbn), all as string.
def _parse_format_pages_isbn(html_chunk): ppi = get_first_content( html_chunk.find("div", {"class": "price-overflow"}) ) if not ppi: return None, None, None # all information this function should parse are at one line ppi = filter(lambda x: x.strip(), ppi.split("<br />"))[0] ...
1,134,828
Parse price of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Price as string with currency or None if not found.
def _parse_price(html_chunk): price = get_first_content( html_chunk.find("div", {"class": "prices"}) ) if not price: return None # it is always in format Cena:\n150kč price = dhtmlparser.removeTags(price) price = price.split("\n")[-1] return price
1,134,829
Parse available informations about book from the book details page. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: obj: :class:`structures.Publication` instance with book details.
def _process_book(html_chunk): title, url = _parse_title_url(html_chunk) book_format, pages, isbn = _parse_format_pages_isbn(html_chunk) # required informations pub = Publication( title=title, authors=_parse_authors(html_chunk), price=_parse_price(html_chunk), publi...
1,134,830
The external landing. Also a convenience function for redirecting users who don't have site access to the external page. Parameters: request - the request in the calling function message - a message from the caller function
def red_ext(request, message=None): if message: messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(reverse('external'))
1,134,918
Convenience function for redirecting users who don't have access to a page to the home page. Parameters: request - the request in the calling function message - a message from the caller function
def red_home(request, message=None): if message: messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(reverse('homepage'))
1,134,919
Permet d'ajouter un fichier Args: file (string): path d'un fichier json Returns: type: None Raises: FileFormatException: Erreur du format de fichier
def addFile(self,file): mylambda= lambda adict : { key.upper() : mylambda(adict[key]) if isinstance(adict[key],dict) else adict[key] for key in adict.keys() } if file.endswith('.json') : with open(file, 'r') as f: fileContent = mylambda(json.load(f)) elif fi...
1,135,203
permet de récupérer une config Args: path (String): Nom d'une config Returns: type: String la valeur de la config ou None
def get(self,path): path = path.upper() if path in self._configCache: return self._configCache[path] else : return self._findConfig(path)
1,135,204
converts the results of a query to RdfDatatype instances args: data: a list of triples
def convert_results(data, **kwargs): if kwargs.get("multiprocessing", False): manager = SharedManager() manager.register("BaseRdfDataType", BaseRdfDataType) manager.register("Uri", Uri) data_l = len(data) group_size = data_l // pool_size if data_l % pool_size: ...
1,135,333
Update the parser object for the shell. Arguments: parser: An instance of argparse.ArgumentParser.
def update_parser(parser): def __stdin(s): if s is None: return None if s == '-': return sys.stdin return open(s, 'r', encoding = 'utf8') parser.add_argument('--root-prompt', metavar = 'STR', default = 'PlayBoy', help = 'th...
1,135,672
When called, get the environment updates and write updates to a CSV file and if a new config has been provided, write a new configuration file. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. verbose (bool): If ``True...
def get_updates( self, display_all_distributions=False, verbose=False ): # pragma: no cover if verbose: logging.basicConfig( stream=sys.stdout, level=logging.INFO, format='%(message)s', ) lo...
1,135,775
Given a list of updates, write the updates out to the provided CSV file. Args: updates (list): List of Update objects.
def write_updates_to_csv(self, updates): with open(self._csv_file_name, 'w') as csvfile: csvwriter = self.csv_writer(csvfile) csvwriter.writerow(CSV_COLUMN_HEADERS) for update in updates: row = [ update.name, u...
1,135,777
Given a list of updates, write the updates out to the provided configuartion file. Args: updates (list): List of Update objects.
def write_new_config(self, updates): with open(self._new_config, 'w') as config_file: for update in updates: line = '{0}=={1} # The installed version is: {2}\n'.format( update.name, update.new_version, update.curre...
1,135,778
Check all pacakges installed in the environment to see if there are any updates availalble. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. Defaults to ``False``. Returns: list: A list of Update objects ordered based...
def _get_environment_updates(self, display_all_distributions=False): updates = [] for distribution in self.pip.get_installed_distributions(): versions = self.get_available_versions(distribution.project_name) max_version = max(versions.keys()) if versions else UNKNOW_NUM...
1,135,779
Query PyPI to see if package has any available versions. Args: project_name (str): The name the project on PyPI. Returns: dict: Where keys are tuples of parsed versions and values are the versions returned by PyPI.
def get_available_versions(self, project_name): available_versions = self.pypi_client.package_releases(project_name) if not available_versions: available_versions = self.pypi_client.package_releases( project_name.capitalize() ) # ``dict()`` for ...
1,135,780
Parse a version string. Args: version (str): A string representing a version e.g. '1.9rc2' Returns: tuple: major, minor, patch parts cast as integer and whether or not it was a pre-release version.
def _parse_version(version): parsed_version = parse_version(version) return tuple( int(dot_version) for dot_version in parsed_version.base_version.split('.') ) + (parsed_version.is_prerelease,)
1,135,781
Parses a json query string into its parts args: qry_str: query string params: variables passed into the string
def parse_json_qry(qry_str): def param_analyzer(param_list): rtn_list = [] for param in param_list: parts = param.strip().split("=") try: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()](parts[1])) except I...
1,136,016
reads the paramater and returns the selected element args: dataset: the dataset to search param: the paramater to search by no_key: wheather to use the 'param' 'element' to filter the list. This is passed True after the first run during recurssive call when t...
def get_json_qry_item(dataset, param, no_key=False): def get_dataset_vals(ds, key, filter_tup=tuple()): def reduce_list(value): if isinstance(value, list): if len(value) == 1: return value[0] return value def merge_list(value): ...
1,136,017
reads the paramater and returns the selected element args: dataset: the dataset to search param: the paramater to search by no_key: wheather to use the 'param' 'element' to filter the list. This is passed True after the first run during recurssive call when t...
def get_reverse_json_qry_item(dataset, param, no_key=False, initial_val=None): def get_dataset_vals(ds, key, filter_tup=tuple(), initial_val=None): def reduce_list(value): if isinstance(value, list): if len(value) == 1: return value[0] return...
1,136,018
Takes a json query string and returns the results args: dataset: RdfDataset to query against qry_str: query string params: dictionary of params
def json_qry(dataset, qry_str, params={}): # if qry_str.startswith("$.bf_itemOf[rdf_type=bf_Print].='print',\n"): # pdb.set_trace() if not '$' in qry_str: qry_str = ".".join(['$', qry_str.strip()]) dallor_val = params.get("$", dataset) if isinstance(dallor_val, rdflib.URIRef): ...
1,136,019
Create equal sized slices of the file. The last slice may be larger than the others. args: * size (int): The full size to be sliced. * n (int): The number of slices to return. returns: A list of `FileSlice` objects of length (n).
def slices(self, size, n=3): if n <= 0: raise ValueError('n must be greater than 0') if size < n: raise ValueError('size argument cannot be less than n argument') slice_size = size // n last_slice_size = size - (n-1) * slice_size t = [self...
1,136,335
Create a file slice. args: * start (int): The beginning position of the slice. The slice will read/write to this position when it is seeked to 0. * size (int): The size of the slice.
def __call__(self, start, size): return FileSlice(self.f, start, size, self.lock)
1,136,336
Gets a MatchObject for the given key. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
def _get_match(self, key): return self._get_string_match(key=key) or \ self._get_non_string_match(key=key)
1,136,594
Gets a MatchObject for the given key, assuming a non-string value. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
def _get_non_string_match(self, key): expression = r'(?:\s*)'.join([ '^', 'define', r'\(', '\'{}\''.format(key), ',', r'(.*)', r'\)', ';' ]) pattern = re.compile(expression, re.MULTILINE) ...
1,136,595
Gets a MatchObject for the given key, assuming a string value. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
def _get_string_match(self, key): expression = r'(?:\s*)'.join([ '^', 'define', r'\(', '\'{}\''.format(key), ',', r'\'(.*)\'', r'\)', ';' ]) pattern = re.compile(expression, re.MULTILINE) ...
1,136,596
Gets the value of the property in the given MatchObject. Args: key (str): Key of the property looked-up. match (MatchObject): The matched property. Return: The discovered value, as a string or boolean.
def _get_value_from_match(self, key, match): value = match.groups(1)[0] clean_value = str(value).lstrip().rstrip() if clean_value == 'true': self._log.info('Got value of "%s" as boolean true.', key) return True if clean_value == 'false': se...
1,136,597
Gets the value of the property of the given key. Args: key (str): Key of the property to look-up.
def get(self, key): match = self._get_match(key=key) if not match: return None return self._get_value_from_match(key=key, match=match)
1,136,598
Updates the value of the given key in the loaded content. Args: key (str): Key of the property to update. value (str): New value of the property. Return: bool: Indicates whether or not a change was made.
def set(self, key, value): match = self._get_match(key=key) if not match: self._log.info('"%s" does not exist, so it will be added.', key) if isinstance(value, str): self._log.info('"%s" will be added as a PHP string value.', ...
1,136,599
Get a user object from the API. If no ``user_id`` or ``user_name`` is specified, it will return the User object for the currently authenticated user. Args: user_id (int): User ID of the user for whom you want to get information. [Optional] user_name(str):...
def get_user(self, user_id=None, user_name=None): if user_id: endpoint = '/api/user_id/{0}'.format(user_id) elif user_name: endpoint = '/api/user_name/{0}'.format(user_name) else: # Return currently authorized user endpoint = '/api/user' ...
1,136,734
Returns a SharedFile object given by the sharekey. Args: sharekey (str): Sharekey of the SharedFile you want to retrieve. Returns: SharedFile
def get_shared_file(self, sharekey=None): if not sharekey: raise Exception("You must specify a sharekey.") endpoint = '/api/sharedfile/{0}'.format(sharekey) data = self._make_request('GET', endpoint) return SharedFile.NewFromJSON(data)
1,136,737
'Like' a SharedFile. mlkshk doesn't allow you to unlike a sharedfile, so this is ~~permanent~~. Args: sharekey (str): Sharekey for the file you want to 'like'. Returns: Either a SharedFile on success, or an exception on error.
def like_shared_file(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{sharekey}/like'.format(sharekey=sharekey) data = self._make_request("P...
1,136,738
Save a SharedFile to your Shake. Args: sharekey (str): Sharekey for the file to save. Returns: SharedFile saved to your shake.
def save_shared_file(self, sharekey=None): endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey) data = self._make_request("POST", endpoint=endpoint, data=None) try: sf = SharedFile.NewFromJSON(data) sf.saved = True return sf ...
1,136,739
Retrieve comments on a SharedFile Args: sharekey (str): Sharekey for the file from which you want to return the set of comments. Returns: List of Comment objects.
def get_comments(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) data = self._make_request("GET", endpoint=en...
1,136,742
Post a comment on behalf of the current user to the SharedFile with the given sharekey. Args: sharekey (str): Sharekey of the SharedFile to which you'd like to post a comment. comment (str): Text of the comment to post. Returns: Comment objec...
def post_comment(self, sharekey=None, comment=None): endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) post_data = {'body': comment} data = self._make_request("POST", endpoint=endpoint, data=post_data) return Comment.NewFromJSON(data)
1,136,743
Update the editable details (just the title and description) of a SharedFile. Args: sharekey (str): Sharekey of the SharedFile to update. title (Optional[str]): Title of the SharedFile. description (Optional[str]): Description of the SharedFile Returns: ...
def update_shared_file(self, sharekey=None, title=None, description=None): if not sharekey: raise Exception( "You must specify a sharekey for the sharedfile" "you wish to update....
1,136,745
Return's the user shop the indexed item is in Parameters: index (int) -- The item index Returns UserShopFront - User shop item is in
def shop(self, index): return UserShopFront(self.usr, item.owner, item.id, str(item.price))
1,136,861
Attempts to buy indexed item, returns result Parameters: index (int) -- The item index Returns bool - True if item was bought, false otherwise
def buy(self, index): item = self.items[index] us = UserShopFront(self.usr, item.owner, item.id, str(item.price)) us.load() if not item.name in us.inventory: return False if not us.inventory[item.name].buy(): return False ...
1,136,862
domain: seattle, bothell, tacoma, pce_ap, pce_ol, pce_ielp, pce (case insensitive) args: year (required) term_name (required): Winter|Spring|Summer|Autumn curriculum_abbreviation course_number section_id student_id (student number) instructor_id (employee identi...
def search_evaluations(domain, **kwargs): url = "{}?{}".format(IAS_PREFIX, urlencode(kwargs)) data = get_resource(url, domain) evaluations = _json_to_evaluation(data) return evaluations
1,136,873
Takes an value and converts it to an elasticsearch representation args: value: the value to convert ranges: the list of ranges method: convertion method to use 'None': default -> converts the value to its json value 'missing_obj': adds attributes as if the va...
def convert_value_to_es(value, ranges, obj, method=None): def sub_convert(val): if isinstance(val, BaseRdfDataType): return val.to_json elif isinstance(value, __MODULE__.rdfclass.RdfClassBase): return val.subject.sparql_uri return val if method == ...
1,136,923
Returns the elasticsearch index types for the obj args: rng_def: the range defintion dictionay ranges: rdfproperty ranges
def get_idx_types(rng_def, ranges): idx_types = rng_def.get('kds_esIndexType', []).copy() if not idx_types: nested = False for rng in ranges: if range_is_obj(rng, __MODULE__.rdfclass): nested = True if nested: idx_types.append('es_Nested') ...
1,136,924
Filters the range defitions based on the bound class args: obj: the rdffroperty instance
def get_prop_range_defs(class_names, def_list): try: cls_options = set(class_names + ['kdr_AllClasses']) return [rng_def for rng_def in def_list \ if not isinstance(rng_def, BlankNode) \ and cls_options.difference(\ set(rng_def.get('kds_a...
1,136,925
Returns the value for an object that goes into the elacticsearch 'value' field args: obj: data object to update def_obj: the class instance that has defintion values
def get_es_value(obj, def_obj): def get_dict_val(item): if isinstance(item, dict): return str(item.get('value')) return str(item) value_flds = [] if def_obj.es_defs.get('kds_esValue'): value_flds = def_obj.es_defs['kds_esValue'].copy() else: # p...
1,136,927
Returns object with label for an object that goes into the elacticsearch 'label' field args: obj: data object to update def_obj: the class instance that has defintion values
def get_es_label(obj, def_obj): label_flds = LABEL_FIELDS if def_obj.es_defs.get('kds_esLabel'): label_flds = def_obj.es_defs['kds_esLabel'] + LABEL_FIELDS try: for label in label_flds: if def_obj.cls_defs.get(label): obj['label'] = def_obj.cls_defs[label][0...
1,136,928