docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Send html document to user. Args: - data: Dict to render template, or string with rendered HTML. - template: Name of template to render HTML document with passed data.
def html(self, data=None, template=None): if data is None: data = {} if template: return render(self.request, template, data) return HttpResponse(data)
1,145,713
Try to look for meta tag in given `dom`. Args: dom (obj): pyDHTMLParser dom of HTML elements. default (default "utr-8"): What to use if encoding is not found in `dom`. Returns: str/default: Given encoding or `default` parameter if not found.
def _get_encoding(dom, default="utf-8"): encoding = dom.find("meta", {"http-equiv": "Content-Type"}) if not encoding: return default encoding = encoding[0].params.get("content", None) if not encoding: return default return encoding.lower().split("=")[-1]
1,145,939
Look for encoding in given `html`. Try to convert `html` to utf-8. Args: html (str): HTML code as string. Returns: str: HTML code encoded in UTF.
def handle_encodnig(html): encoding = _get_encoding( dhtmlparser.parseString( html.split("</head>")[0] ) ) if encoding == "utf-8": return html return html.decode(encoding).encode("utf-8")
1,145,940
Check is `element` object match rest of the parameters. All checks are performed only if proper attribute is set in the HTMLElement. Args: element (obj): HTMLElement instance. tag_name (str): Tag name. params (dict): Parameters of the tag. content (str): Content of the tag. ...
def is_equal_tag(element, tag_name, params, content): if tag_name and tag_name != element.getTagName(): return False if params and not element.containsParamSubset(params): return False if content is not None and content.strip() != element.getContent().strip(): return False ...
1,145,941
Create a ConsoleWidget. Parameters: ----------- parent : QWidget, optional [default None] The parent for this widget.
def __init__(self, parent=None, **kw): QtGui.QWidget.__init__(self, parent) LoggingConfigurable.__init__(self, **kw) # While scrolling the pager on Mac OS X, it tears badly. The # NativeGesture is platform and perhaps build-specific hence # we take adequate precautions...
1,146,080
Clear the console. Parameters: ----------- keep_input : bool, optional (default True) If set, restores the old input buffer if a new prompt is written.
def clear(self, keep_input=True): if self._executing: self._control.clear() else: if keep_input: input_buffer = self.input_buffer self._control.clear() self._show_prompt() if keep_input: self.input_buffe...
1,146,085
Paste the contents of the clipboard into the input region. Parameters: ----------- mode : QClipboard::Mode, optional [default QClipboard::Clipboard] Controls which part of the system clipboard is used. This can be used to access the selection clipboard in X11 and the Fi...
def paste(self, mode=QtGui.QClipboard.Clipboard): if self._control.textInteractionFlags() & QtCore.Qt.TextEditable: # Make sure the paste is safe. self._keep_cursor_in_buffer() cursor = self._control.textCursor() # Remove any trailing newline, which conf...
1,146,091
Given a KeyboardModifiers flags object, return whether the Control key is down. Parameters: ----------- include_command : bool, optional (default True) Whether to treat the Command key as a (mutually exclusive) synonym for Control when in Mac OS.
def _control_key_down(self, modifiers, include_command=False): # Note that on Mac OS, ControlModifier corresponds to the Command key # while MetaModifier corresponds to the Control key. if sys.platform == 'darwin': down = include_command and (modifiers & QtCore.Qt.ControlMod...
1,146,105
Displays text using the pager if it exceeds the height of the viewport. Parameters: ----------- html : bool, optional (default False) If set, the text will be interpreted as HTML instead of plain text.
def _page(self, text, html=False): line_height = QtGui.QFontMetrics(self.font).height() minlines = self._control.viewport().height() / line_height if self.paging != 'none' and \ re.match("(?:[^\n]*\n){%i}" % minlines, text): if self.paging == 'custom': ...
1,146,128
Return last <td> found in `el` DOM. Args: el (obj): :class:`dhtmlparser.HTMLElement` instance. Returns: obj: HTMLElement instance if found, or None if there are no <td> tags.
def _get_last_td(el): if not el: return None if type(el) in [list, tuple, set]: el = el[0] last = el.find("td") if not last: return None return last[-1]
1,146,276
Get <tr> tag with given `ID` and return content of the last <td> tag from <tr> root. Args: details (obj): :class:`dhtmlparser.HTMLElement` instance. ID (str): id property of the <tr> tag. Returns: str: Content of the last <td> as strign.
def _get_td_or_none(details, ID): content = details.find("tr", {"id": ID}) content = _get_last_td(content) # if content is None, return it if not content: return None content = content.getContent().strip() # if content is blank string, return None if not content: retu...
1,146,277
Parse title/name of the book. Args: dom (obj): HTMLElement containing whole HTML page. details (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. Raises: AssertionError: If title not found.
def _parse_title(dom, details): title = details.find("h1") # if the header is missing, try to parse title from the <title> tag if not title: title = dom.find("title") assert title, "Can't find <title> tag!" return title[0].getContent().split("|")[0].strip() return title[0...
1,146,278
Parse authors of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
def _parse_authors(details): authors = details.find( "tr", {"id": "ctl00_ContentPlaceHolder1_tblRowAutor"} ) if not authors: return [] # book with unspecified authors # parse authors from HTML and convert them to Author objects author_list = [] for author in autho...
1,146,279
Parse publisher of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Publisher's name as string or None if not found.
def _parse_publisher(details): publisher = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowNakladatel" ) # publisher is not specified if not publisher: return None publisher = dhtmlparser.removeTags(publisher).strip() # return None instead of blank stri...
1,146,280
Parse number of pages and binding of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: (pages, binding): Tuple with two string or two None.
def _parse_pages_binding(details): pages = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowRozsahVazba" ) if not pages: return None, None binding = None # binding info and number of pages is stored in same string if "/" in pages: binding = pages.spl...
1,146,281
Parse ISBN and EAN. Args: details (obj): HTMLElement containing slice of the page with details. Returns: (ISBN, EAN): Tuple with two string or two None.
def _parse_ISBN_EAN(details): isbn_ean = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowIsbnEan" ) if not isbn_ean: return None, None ean = None isbn = None if "/" in isbn_ean: # ISBN and EAN are stored in same string isbn, ean = isbn_ean.split...
1,146,282
Parse description of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Details as string with currency or None if not found.
def _parse_description(details): description = details.find("div", {"class": "detailPopis"}) # description not found if not description: return None # remove links to ebook version ekniha = description[0].find("div", {"class": "ekniha"}) if ekniha: ekniha[0].replaceWith(dh...
1,146,283
Parse available informations about book from the book details page. Args: book_url (str): Absolute URL of the book. Returns: obj: :class:`structures.Publication` instance with book details.
def _process_book(book_url): data = DOWNER.download(book_url) dom = dhtmlparser.parseString(data) details_tags = dom.find("div", {"id": "contentDetail"}) assert details_tags, "Can't find details of the book." details = details_tags[0] # parse required informations title = _parse_tit...
1,146,284
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the nu...
def pi_to_number(self, page=1, item=1): if page > 1: return ((page - 1) * self.page_items) + item else: return 0 + item
1,146,334
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the nu...
def sub_pi_to_number(self, subpage=1, subitem=1): if subitem == None: subitem = 0 if subpage == None: return 0 else: if subpage > 1: return ((subpage - 1) * self.subpage_items) + subitem else: return 0 + su...
1,146,335
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the nu...
def current_spi_to_number(self): if self.slots['subpage'] == None: return self.sub_pi_to_number(0, 0) else: return self.sub_pi_to_number(self.slots['subpage'], self.slots['subitem'])
1,146,336
Return the number of items on page. Args: * page = The Page to test for * total_items = the total item count Returns: * Integer - Which represents the calculated number of items on page.
def return_item_count_on_page(self, page=1, total_items=1): up_to_page = ((page - 1) * self.page_items) # Number of items up to the page in question if total_items > up_to_page: # Remove all the items up to the page in question # count = total_it...
1,146,345
Return the number of items on page. Args: * page = The Page to test for * total_items = the total item count Returns: * Integer - Which represents the calculated number of items on page.
def return_item_count_on_subpage(self, subpage=1, total_items=1): up_to_subpage = ((subpage - 1) * self.subpage_items) # Number of items up to the page in question if total_items > up_to_subpage: # Remove all the items up to the page in question # ...
1,146,346
Set a value by key. Arguments: cache: instance of Cache key: 'user:342:username',
def set_value(cache, key, value): with cache as redis_connection: return redis_connection.set(key, value)
1,146,523
Get data from the .dat files args: inputfile: file Input File close: bool, default=False Closes inputfile if True inputfile (File): Input file close (boolean): Closes inputfile if True (default: False) returns: dictionary: data: list o...
def getdata(inputfile, argnum=None, close=False): # get data and converts them to list # outputtype - list, dict, all output = [] add_data = {} line_num = 0 for line in inputfile: line_num += 1 if ("#" not in line) and (line != ""): linesplit = line.split() ...
1,146,587
Helper function for parse.getdata. Remove empty variables, convert strings to float args: inputlist: list List of Variables Returns: ouput: Cleaned list
def cleandata(inputlist): output = [] for e in inputlist: new = [] for f in e: if f == "--": new.append(None) else: new.append(float(f)) output.append(new) return output
1,146,588
Compute the quadratic estimate of the centroid in a 2d-array. Args: data (2darray): two dimensional data array Returns center (tuple): centroid estimate on the row and column directions, respectively
def quadratic_2d(data): arg_data_max = np.argmax(data) i, j = np.unravel_index(arg_data_max, data.shape) z_ = data[i-1:i+2, j-1:j+2] # our quadratic function is defined as # f(x, y | a, b, c, d, e, f) := a + b * x + c * y + d * x^2 + e * xy + f * y^2 # therefore, the best fit coeffiecients ...
1,146,675
Create zone records. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name identifier: Template ID dtype: MASTER|SLAVE|NATIVE (default: MASTER) master: master server ip address when dtype is...
def create_zone(server, token, domain, identifier, dtype, master=None): method = 'PUT' uri = 'https://' + server + '/zone' obj = JSONConverter(domain) obj.generate_zone(domain, identifier, dtype, master) connect.tonicdns_client(uri, method, token, obj.zone)
1,146,881
Create records of specific domain. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name data: Create records ContentType: application/json x-authentication-token: token
def create_records(server, token, domain, data): method = 'PUT' uri = 'https://' + server + '/zone/' + domain for i in data: connect.tonicdns_client(uri, method, token, i)
1,146,882
Delete records of specific domain. Arguments: server: TonicDNS API server token: TonicDNS API authentication token data: Delete records ContentType: application/json x-authentication-token: token
def delete_records(server, token, data): method = 'DELETE' uri = 'https://' + server + '/zone' for i in data: connect.tonicdns_client(uri, method, token, i)
1,146,883
Look for negihbours of the `element`, return proper :class:`PathCall`. Args: element (obj): HTMLElement instance of the object you are looking for. Returns: list: List of :class:`PathCall` instances.
def neighbours_pattern(element): # check if there are any neighbours if not element.parent: return [] parent = element.parent # filter only visible tags/neighbours neighbours = filter( lambda x: x.isTag() and not x.isEndTag() or x.getContent().strip() \ or x ...
1,147,311
Look for `element` by its predecesors. Args: element (obj): HTMLElement instance of the object you are looking for. root (obj): Root of the `DOM`. Returns: list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \ allow use with ``.extend(predecesors_pattern()...
def predecesors_pattern(element, root): def is_root_container(el): return el.parent.parent.getTagName() == "" if not element.parent or not element.parent.parent or \ is_root_container(element): return [] trail = [ [ element.parent.parent.getTagName(), ...
1,147,312
Give the server information about this node Arguments: node -- node_name or token for the node this data belongs to key -- identifiable key, that you use later to retrieve that piece of data kwargs -- the data you need to store
def post(node_name, key, **kwargs): node = nago.core.get_node(node_name) if not node: raise ValueError("Node named %s not found" % node_name) token = node.token node_data[token] = node_data[token] or {} node_data[token][key] = kwargs return "thanks!"
1,147,409
Send our information to a remote nago instance Arguments: node -- node_name or token for the node this data belongs to
def send(node_name): my_data = nago.core.get_my_info() if not node_name: node_name = nago.settings.get('server') node = nago.core.get_node(node_name) json_params = {} json_params['node_name'] = node_name json_params['key'] = "node_info" for k, v in my_data.items(): nago....
1,147,410
Returns bestfit_function args: bestfit_x: scalar, array_like x value return: scalar, array_like bestfit y value
def bestfit_func(self, bestfit_x): if not self.done_bestfit: raise KeyError("Do do_bestfit first") bestfit_y = self.fit_args[1] * (bestfit_x ** self.fit_args[0]) return bestfit_y
1,147,584
Only dispatch if the event does not correspond to an ignored file. Args: event (watchdog.events.FileSystemEvent)
def dispatch(self, event): if event.is_directory: return paths = [] if has_attribute(event, 'dest_path'): paths.append(os.path.realpath( unicode_paths.decode(event.dest_path))) if event.src_path: paths.append(os.path.realpath( ...
1,147,625
Return content of the first element in `el_list` or `alt`. Also return `alt` if the content string of first element is blank. Args: el_list (list): List of HTMLElement objects. alt (default None): Value returner when list or content is blank. strip (bool, default True): Call .strip() to...
def get_first_content(el_list, alt=None, strip=True): if not el_list: return alt content = el_list[0].getContent() if strip: content = content.strip() if not content: return alt return content
1,147,726
Test whether `url` is absolute url (``http://domain.tld/something``) or relative (``../something``). Args: url (str): Tested string. protocol (str, default "http"): Protocol which will be seek at the beginning of the `url`. Returns: bool: True if url is absolute, F...
def is_absolute_url(url, protocol="http"): if ":" not in url: return False protocol, rest = url.split(":", 1) if protocol.startswith(protocol) and rest.startswith("//"): return True return False
1,147,727
Normalize the `url` - from relative, create absolute URL. Args: base_url (str): Domain with ``protocol://`` string rel_url (str): Relative or absolute url. Returns: str/None: Normalized URL or None if `url` is blank.
def normalize_url(base_url, rel_url): if not rel_url: return None if not is_absolute_url(rel_url): rel_url = rel_url.replace("../", "/") if (not base_url.endswith("/")) and (not rel_url.startswith("/")): return base_url + "/" + rel_url.replace("../", "/") retu...
1,147,728
Generate function, which checks whether the content of the tag matchs `tag_content`. Args: tag_content (str): Content of the tag which will be matched thru whole DOM. content_transformer (fn, default None): Function used to transform all ta...
def content_matchs(tag_content, content_transformer=None): def content_matchs_closure(element): if not element.isTag(): return False cont = element.getContent() if content_transformer: cont = content_transformer(cont) return tag_content == cont ret...
1,147,731
Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`. Args: post_dict (dict): dictionary from :class:`PostData.get_POST_data()` Returns: str: Reponse from webform.
def _sendPostDict(post_dict): downer = Downloader() downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict) rheaders = downer.response_headers error_msg = rheaders.get("aleph-info", "").lower().strip() if "aleph-info" i...
1,147,751
Gets basic information from a binary stream to allow correct processing of the attribute header. This function allows the interpretation of the Attribute type, attribute length and if the attribute is non resident. Args: binary_view (memoryview of bytearray) - A binary stream with the ...
def get_attr_info(binary_view): global _ATTR_BASIC attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9]) return (AttrTypes(attr_type), attr_len, bool(non_resident))
1,148,032
Changes the time zones of all timestamps. Receives a new timezone and applies to all timestamps, if necessary. Args: timezone (:obj:`tzinfo`): Time zone to be applied Returns: A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.
def _astimezone_ts(self, timezone): if self.created.tzinfo is timezone: return self else: nw_obj = Timestamps((None,)*4) nw_obj.created = self.created.astimezone(timezone) nw_obj.changed = self.changed.astimezone(timezone) nw_obj.mft_changed = self.mft_changed.astime...
1,148,035
Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise.
def _entry_allocated_bitmap(self, entry_number): index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
1,148,048
Creates a new object DataRuns from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns...
def create_from_binary(cls, binary_view): nw_obj = cls() offset = 0 previous_dr_offset = 0 header_size = cls._INFO.size #"header" of a data run is always a byte while binary_view[offset] != 0: #the runlist ends with an 0 as the "header" header = cls._INFO....
1,148,066
Creates a new object AttributeHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute ...
def create_from_binary(cls, binary_view): attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \ content_len, content_offset, indexed_flag = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:name_offset+(2*name_l...
1,148,069
Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf
def get_pdffilepath(pdffilename): return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
1,148,156
Do make_tex_table and pass all arguments args: inputlist: list outputfilename: string fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwarg...
def make_tex_table(inputlist, outputfilename, fmt=None, **kwargs): outputfilepath = FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=outputfilename, folder=PURPOSE.get("tables").get("folder", "tables"), ext=PURPOSE.get("tabl...
1,148,157
reads an elasticsearh mapping dictionary and returns a list of fields cojoined with a dot notation args: obj: the dictionary to parse parent: name for a parent key. used with a recursive call
def mapping_fields(mapping, parent=[]): rtn_obj = {} for key, value in mapping.items(): new_key = parent + [key] new_key = ".".join(new_key) rtn_obj.update({new_key: value.get('type')}) if value.get('properties'): rtn_obj.update(mapping_fields(value['prop...
1,148,159
reads a dictionary and returns a list of fields cojoined with a dot notation args: obj: the dictionary to parse parent: name for a parent key. used with a recursive call
def dict_fields(obj, parent=[]): rtn_obj = {} for key, value in obj.items(): new_key = parent + [key] new_key = ".".join(new_key) if isinstance(value, list): if value: value = value[0] if isinstance(value, dict): rtn_obj.upda...
1,148,160
Returns all the rdfclasses that have and associated elasticsearch mapping Args: None
def list_mapped_classes(): cls_dict = {key: value for key, value in MODULE.rdfclass.__dict__.items() if not isinstance(value, RdfConfigManager) and key not in ['properties'] and hasattr(value, 'es_defs') ...
1,148,162
Returns a dictionary with the key as the es_index name and the object is a list of rdfclasses for that index args: None
def list_indexes(cls): cls_list = cls.list_mapped_classes() rtn_obj = {} for key, value in cls_list.items(): idx = value.es_defs.get('kds_esIndex')[0] try: rtn_obj[idx].append(value) except KeyError: rtn_obj[...
1,148,163
Returns an elasticsearch mapping for the specified index based off of the mapping defined by rdf class definitions args: idx_obj: Dictionary of the index and a list of rdfclasses included in the mapping
def get_rdf_es_idx_map(cls, idx_obj): idx_name = list(idx_obj)[0] es_map = { "index": idx_name, "body" : { "mappings": {}, "settings": { # "read_only_allow_delete": False, "index": { ...
1,148,164
sends the mapping to elasticsearch args: es_map: dictionary of the index mapping kwargs: reset_idx: WARNING! If True the current referenced es index will be deleted destroying all data in that index in elasticsearch. if False an i...
def send_es_mapping(self, es_map, **kwargs): log.setLevel(kwargs.get('log_level', self.log_level)) def next_es_index_version(curr_alias): try: alias_def = self.es.indices.get_alias(alias) except es_except.NotFoundError: ...
1,148,165
Retruns a dictionary of mappings and the fiels names in dot notation args: mappings: es mapping defitions to parse
def mapping_ref(self, es_mappings): new_map = {} for key, value in es_mappings.items(): for sub_key, sub_value in value.items(): new_map["/".join([key, sub_key])] = \ mapping_fields(sub_value['properties']) return new_map
1,148,168
Validate value. Args: value: Returns: A validated value. Raises: UnitError
def validate(self, value): for validate in self.validates: value = validate(value) return value
1,148,312
Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch...
def get_ref(profile, ref): resource = "/refs/" + ref data = api.get_request(profile, resource) return prepare(data)
1,148,350
Create a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to create, e.g., ``heads/my-feature-bran...
def create_ref(profile, ref, sha): resource = "/refs" payload = {"ref": "refs/" + ref, "sha": sha} data = api.post_request(profile, resource, payload) return prepare(data)
1,148,351
Point a ref to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to update, e.g., ``heads/my-...
def update_ref(profile, ref, sha): resource = "/refs/" + ref payload = {"sha": sha} data = api.patch_request(profile, resource, payload) return prepare(data)
1,148,352
Filter :class:`.Publication` objects using settings declared in :mod:`~harvester.settings` submodule. Args: publication (obj): :class:`.Publication` instance. Returns: obj/None: None if the publication was found in Aleph or `publication` \ if not.
def filter_publication(publication): if settings.USE_DUP_FILTER: publication = dup_filter.filter_publication(publication) if publication and settings.USE_ALEPH_FILTER: publication = aleph_filter.filter_publication( publication, cmp_authors=settings.ALEPH_FILTER_BY_A...
1,148,430
tests to see if the directory is writable. If the directory does it can attempt to create it. If unable returns False args: directory: filepath to the directory kwargs: mkdir[bool]: create the directory if it does not exist returns
def is_writable_dir(directory, **kwargs): try: testfile = tempfile.TemporaryFile(dir = directory) testfile.close() except OSError as e: if e.errno == errno.EACCES: # 13 return False elif e.errno == errno.ENOENT: # 2 if kwargs.get('mkdir') ==...
1,148,471
Returns a list of files args: file_directory: a sting path to the file directory file_extensions: a list of file extensions to filter example ['xml', 'rdf']. If none include all files include_subfolders: as implied include_root: whether to include the root in ...
def list_files(file_directory, file_extensions=None, include_subfolders=True, include_root=True, root_dir=None): log = logging.getLogger("%s" % (inspect.stack()[0][3])) log.setLevel(__LOG_LEVEL__) rtn_list = [] if not root_dir...
1,148,472
Formats a namespace and ending value into a python friendly format args: namespace: RdfNamespace or tuple in the format of (prefix, uri,) value: end value to attach to the namespace
def pyuri_formatter(namespace, value): if namespace[0]: return "%s_%s" %(namespace[0], value) else: return "pyuri_%s_%s" % (base64.b64encode(bytes(namespace[1], "utf-8")).decode(), value)
1,148,596
Create a new tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. tree A list of blob objects (each with a path, ...
def create_tree(profile, tree): resource = "/trees" payload = {"tree": tree} data = api.post_request(profile, resource, payload) return prepare(data)
1,148,774
Converts datetime isoformat string to datetime (dt) object Args: :dt_str (str): input string in '2017-12-30T18:48:00.353Z' form or similar Returns: TYPE: datetime object
def convert_strtime_datetime(dt_str): dt, _, us = dt_str.partition(".") dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") us = int(us.rstrip("Z"), 10) return dt + datetime.timedelta(microseconds=us)
1,148,906
Summary: Convert duration into component time units Args: :duration (datetime.timedelta): time duration to convert Returns: days, hours, minutes, seconds | TYPE: tuple (integers)
def convert_timedelta(duration): days, seconds = duration.days, duration.seconds hours = seconds // 3600 minutes = (seconds % 3600) // 60 seconds = (seconds % 60) return days, hours, minutes, seconds
1,148,907
Summary: convert timedelta objects to human readable output Args: :duration (datetime.timedelta): time duration to convert :return_iter (tuple): tuple containing time sequence Returns: days, hours, minutes, seconds | TYPE: tuple (integers), OR human readable, notated uni...
def convert_dt_time(duration, return_iter=False): try: days, hours, minutes, seconds = convert_timedelta(duration) if return_iter: return days, hours, minutes, seconds # string format conversions if days > 0: format_string = ( '{} day{}, {...
1,148,908
Summary: Retrieve local operating system environment characteristics Args: :user (str): USERNAME, only required when run on windows os Returns: TYPE: dict object containing key, value pairs describing os information
def get_os(detailed=False): try: os_type = platform.system() if os_type == 'Linux': os_detail = platform.uname() distribution = platform.linux_distribution() HOME = os.environ['HOME'] username = os.getenv('USER') elif os_type == 'Windows...
1,148,910
Summary: Parse, update local awscli config credentials Args: :user (str): USERNAME, only required when run on windows os Returns: TYPE: dict object containing key, value pairs describing os information
def awscli_defaults(os_type=None): try: if os_type is None: os_type = platform.system() if os_type == 'Linux': HOME = os.environ['HOME'] awscli_credentials = HOME + '/.aws/credentials' awscli_config = HOME + '/.aws/config' elif os_type =...
1,148,911
Summary: Creates local config from JSON seed template Args: :config_file (str): filesystem object containing json dict of config values :json_config_obj (json): data to be written to config_file :config_dirname (str): dir name containing config_file Returns: TYPE: bool,...
def config_init(config_file, json_config_obj, config_dirname=None): HOME = os.environ['HOME'] # client config dir if config_dirname: dir_path = HOME + '/' + config_dirname if not os.path.exists(dir_path): os.mkdir(dir_path) os.chmod(dir_path, 0o755) else: ...
1,148,912
Summary: exports object to block filesystem object Args: :dict_obj (dict): dictionary object :filename (str): name of file to be exported (optional) Returns: True | False Boolean export status
def export_json_object(dict_obj, filename=None): try: if filename: try: with open(filename, 'w') as handle: handle.write(json.dumps(dict_obj, indent=4, sort_keys=True)) logger.info( '%s: Wrote %s to local filesy...
1,148,913
Summary: Imports block filesystem object Args: :filename (str): block filesystem object Returns: dictionary obj (valid json file), file data object
def import_file_object(filename): try: handle = open(filename, 'r') file_obj = handle.read() dict_obj = json.loads(file_obj) except IOError as e: logger.critical( 'import_file_object: %s error opening %s' % (str(e), str(filename)) ) raise e e...
1,148,914
Summary: Validates baseline dict against suspect dict to ensure contain USERNAME k,v parameters. Args: baseline (dict): baseline json structure suspect (dict): json object validated against baseline structure Returns: Success (matches baseline) | Failure (no match), TYPE:...
def json_integrity(baseline, suspect): try: for k,v in baseline.items(): for ks, vs in suspect.items(): keys_baseline = set(v.keys()) keys_suspect = set(vs.keys()) intersect_keys = keys_baseline.intersection(keys_suspect) added...
1,148,915
Parses local config file for override values Args: :local_file (str): filename of local config file Returns: dict object of values contained in local config file
def read_local_config(cfg): try: if os.path.exists(cfg): config = import_file_object(cfg) return config else: logger.warning( '%s: local config file (%s) not found, cannot be read' % (inspect.stack()[0][3], str(cfg))) excep...
1,148,917
Withdraws given number of NPs from the shop till, returns result Parameters: nps (int) -- Number of NPs to withdraw Returns bool - True if successful, False otherwise
def grabTill(self, nps): if not int(nps): return False pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=till") form = pg.form(action="process_market.phtml") form['amount'] = str(nps) form.usePin = True pg = form.su...
1,148,958
Get the list of committed signatures Args: vcs (easyci.vcs.base.Vcs) Returns: list(basestring) - list of signatures
def get_committed_signatures(vcs): committed_path = _get_committed_history_path(vcs) known_signatures = [] if os.path.exists(committed_path): with open(committed_path, 'r') as f: known_signatures = f.read().split() return known_signatures
1,148,998
Get the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) Returns: list(basestring) - list of signatures
def get_staged_signatures(vcs): staged_path = _get_staged_history_path(vcs) known_signatures = [] if os.path.exists(staged_path): with open(staged_path, 'r') as f: known_signatures = f.read().split() return known_signatures
1,148,999
Add `signature` to the list of committed signatures The signature must already be staged Args: vcs (easyci.vcs.base.Vcs) user_config (dict) signature (basestring) Raises: NotStagedError AlreadyCommittedError
def commit_signature(vcs, user_config, signature): if signature not in get_staged_signatures(vcs): raise NotStagedError evidence_path = _get_committed_history_path(vcs) committed_signatures = get_committed_signatures(vcs) if signature in committed_signatures: raise AlreadyCommittedE...
1,149,000
Add `signature` to the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: AlreadyStagedError
def stage_signature(vcs, signature): evidence_path = _get_staged_history_path(vcs) staged = get_staged_signatures(vcs) if signature in staged: raise AlreadyStagedError staged.append(signature) string = '\n'.join(staged) with open(evidence_path, 'w') as f: f.write(string)
1,149,001
Remove `signature` from the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: NotStagedError
def unstage_signature(vcs, signature): evidence_path = _get_staged_history_path(vcs) staged = get_staged_signatures(vcs) if signature not in staged: raise NotStagedError staged.remove(signature) string = '\n'.join(staged) with open(evidence_path, 'w') as f: f.write(string)
1,149,002
Clear (committed) test run history from this project. Args: vcs (easyci.vcs.base.Vcs)
def clear_history(vcs): evidence_path = _get_committed_history_path(vcs) if os.path.exists(evidence_path): os.remove(evidence_path)
1,149,003
Decorator for restrict access to views according by list of themes. Params: * ``theme`` - string or list of themes where decorated view must be * ``redirect_to`` - url or name of url pattern for redirect if CURRENT_THEME not in themes * ``raise_error`` - error class for raising...
def only_for(theme, redirect_to='/', raise_error=None): def check_theme(*args, **kwargs): if isinstance(theme, six.string_types): themes = (theme,) else: themes = theme if settings.CURRENT_THEME is None: return True result = settings.CURRENT...
1,149,053
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True...
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
1,149,056
Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found.
def _SkipFieldValue(tokenizer): # String/bytes tokens can come in multiple adjacent string literals. # If we can consume one, consume as many as we can. if tokenizer.TryConsumeByteString(): while tokenizer.TryConsumeByteString(): pass return if (not tokenizer.TryConsumeIdentifier() and n...
1,149,057
Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
def ParseInteger(text, is_signed=False, is_long=False): # Do the actual parsing. Exception handling is propagated to caller. try: # We force 32-bit values to int and 64-bit values to long to make # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) sim...
1,149,058
Convert protobuf message to text format. Args: message: The protocol buffers message.
def PrintMessage(self, message): fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): # This is slow for maps with submessage entires because it copies the ...
1,149,060
Print a single field value (not including name). For repeated fields, the value should be a single element. Args: field: The descriptor of the field to be printed. value: The value of the field.
def PrintFieldValue(self, field, value): out = self.out if self.pointy_brackets: openb = '<' closeb = '>' else: openb = '{' closeb = '}' if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: if self.as_one_line: out.write(' %s ' % openb) sel...
1,149,061
Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems.
def _ParseOrMerge(self, lines, message): tokenizer = _Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
1,149,063
Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems.
def _MergeMessageField(self, tokenizer, message, field): is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: ...
1,149,064
Convert :class:`.MARCXMLRecord` object to :class:`.EPublication` namedtuple. Args: xml (str/MARCXMLRecord): MARC XML which will be converted to EPublication. In case of str, ``<record>`` tag is required. Returns: structure: :class:`.EPublication` namedtu...
def from_xml(xml): parsed = xml if not isinstance(xml, MARCXMLRecord): parsed = MARCXMLRecord(str(xml)) # check whether the document was deleted if "DEL" in parsed.datafields: raise DocumentNotFoundException("Document was deleted.") # i know, th...
1,149,244
Export the contents of the ConsoleWidget as XHTML with inline SVGs. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML to export. filename : str The file to be saved. image_tag : callable, optional (default None) Used to convert images...
def export_xhtml(html, filename, image_tag=None): if image_tag is None: image_tag = default_image_tag else: image_tag = ensure_utf8(image_tag) with open(filename, 'w') as f: # Hack to make xhtml header -- note that we are not doing any check for # valid XML. off...
1,149,301
Transforms a Qt-generated HTML string into a standards-compliant one. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML.
def fix_html(html): # A UTF-8 declaration is needed for proper rendering of some characters # (e.g., indented commands) when viewing exported HTML on a local system # (i.e., without seeing an encoding declaration in an HTTP header). # C.f. http://www.w3.org/International/O-charset for details. ...
1,149,303
Set a rules as object attribute. Arguments: name (string): Rule name to set as attribute name. properties (dict): Dictionnary of properties.
def set_rule(self, name, properties): self._rule_attrs.append(name) setattr(self, name, properties)
1,149,308
Remove a rule from attributes. Arguments: name (string): Rule name to remove.
def remove_rule(self, name): self._rule_attrs.remove(name) delattr(self, name)
1,149,309
Adds a list of file locations to the current list Args: file_locations: list of file location tuples
def add_file_locations(self, file_locations=[]): if not hasattr(self, '__file_locations__'): self.__file_locations__ = copy.copy(file_locations) else: self.__file_locations__ += copy.copy(file_locations)
1,149,559
Loads the file_locations into the triplestores args: file_locations: list of tuples to load [('vocabularies', [list of vocabs to load]) ('directory', '/directory/path') ('filepath', '/path/to/a/file') ('package_all',...
def load(self, file_locations=[], **kwargs): self.set_load_state(**kwargs) if file_locations: self.__file_locations__ += file_locations else: file_locations = self.__file_locations__ conn = self.__get_conn__(**kwargs) if file_locations: ...
1,149,563
sets the cache directory by test write permissions for various locations args: directories: list of directories to test. First one with read-write permissions is selected.
def __set_cache_dir__(self, cache_dirs=[], **kwargs): # add a path for a subfolder 'vocabularies' log.setLevel(kwargs.get("log_level", self.log_level)) log.debug("setting cache_dir") test_dirs = cache_dirs try: test_dirs += [__CFG__.dirs.data] except ...
1,149,564
loads a file into the defintion triplestore args: filepath: the path to the file
def load_file(self, filepath, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) filename = os.path.split(filepath)[-1] if filename in self.loaded: if self.loaded_times.get(filename, datetime.datetime(2001,1,1)).timestamp() \ ...
1,149,565
updated the mod time for a file saved to the definition_store Args: filename: the name of the file
def __update_time__(self, filename, **kwargs): conn = self.__get_conn__(**kwargs) load_time = XsdDatetime(datetime.datetime.utcnow()) conn.update_query(.format(file=filename, ctime=load_time.sparql, graph="kdr:load_times"), ...
1,149,566
removes the passed in file from the connected triplestore args: filename: the filename to remove
def drop_file(self, filename, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) conn = self.__get_conn__(**kwargs) result = conn.update_query("DROP GRAPH %s" % \ getattr(__NSM__.kdr, filename).sparql, *...
1,149,567