idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
47,700 | def _request_post_helper ( self , url , params = None ) : if self . api_key : query = { 'api_key' : self . api_key } return requests . post ( url , params = query , data = params , timeout = 60 ) | API POST helper |
47,701 | def _request_helper ( self , url , params , method ) : try : if method == 'POST' : return self . _request_post_helper ( url , params ) elif method == 'GET' : return self . _request_get_helper ( url , params ) raise VultrError ( 'Unsupported method %s' % method ) except requests . RequestException as ex : raise RuntimeError ( ex ) | API request helper method |
47,702 | def halt_running ( ) : vultr = Vultr ( API_KEY ) try : serverList = vultr . server . list ( ) except VultrError as ex : logging . error ( 'VultrError: %s' , ex ) for serverID in serverList : if serverList [ serverID ] [ 'power_status' ] == 'running' : logging . info ( serverList [ serverID ] [ 'label' ] + " will be gracefully shutdown." ) vultr . server . halt ( serverID ) | Halts all running servers |
47,703 | def tag_arxiv ( line ) : def tagger ( match ) : groups = match . groupdict ( ) if match . group ( 'suffix' ) : groups [ 'suffix' ] = ' ' + groups [ 'suffix' ] else : groups [ 'suffix' ] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s' u'%(month)s.%(num)s%(suffix)s' u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits . sub ( tagger , line ) line = re_arxiv . sub ( tagger , line ) line = re_new_arxiv_5digits . sub ( tagger , line ) line = re_new_arxiv . sub ( tagger , line ) return line | Tag arxiv report numbers |
47,704 | def tag_arxiv_more ( line ) : line = RE_ARXIV_CATCHUP . sub ( ur"\g<suffix>/\g<year>\g<month>\g<num>" , line ) for report_re , report_repl in RE_OLD_ARXIV : report_number = report_repl + ur"/\g<num>" line = report_re . sub ( u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>' , line ) return line | Tag old arxiv report numbers |
47,705 | def tag_pos_volume ( line ) : def tagger ( match ) : groups = match . groupdict ( ) try : year = match . group ( 'year' ) except IndexError : g = re . search ( re_pos_year_num , match . group ( 'volume_num' ) , re . UNICODE ) year = g . group ( 0 ) if year : groups [ 'year' ] = ' <cds.YR>(%s)</cds.YR>' % year . strip ( ) . strip ( '()' ) else : groups [ 'year' ] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' '%(year)s' ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos : line = p . sub ( tagger , line ) return line | Tag POS volume number |
47,706 | def find_numeration_more ( line ) : patterns = ( re_correct_numeration_2nd_try_ptn1 , re_correct_numeration_2nd_try_ptn2 , re_correct_numeration_2nd_try_ptn3 , re_correct_numeration_2nd_try_ptn4 , ) for pattern in patterns : match = pattern . search ( line ) if match : info = match . groupdict ( ) series = extract_series_from_volume ( info [ 'vol' ] ) if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt' ] if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt2' ] return { 'year' : info . get ( 'year' , None ) , 'series' : series , 'volume' : info [ 'vol_num' ] , 'page' : info [ 'page' ] or info [ 'jinst_page' ] , 'page_end' : info [ 'page_end' ] , 'len' : len ( info [ 'aftertitle' ] ) } return None | Look for other numeration in line . |
47,707 | def identify_ibids ( line ) : ibid_match_txt = { } for m_ibid in re_ibid . finditer ( line ) : ibid_match_txt [ m_ibid . start ( ) ] = m_ibid . group ( 0 ) line = line [ 0 : m_ibid . start ( ) ] + "_" * len ( m_ibid . group ( 0 ) ) + line [ m_ibid . end ( ) : ] return ibid_match_txt , line | Find IBIDs within the line record their position and length and replace them with underscores . |
47,708 | def find_numeration ( line ) : patterns = ( re_numeration_vol_page_yr , re_numeration_vol_nucphys_page_yr , re_numeration_nucphys_vol_page_yr , re_numeration_vol_subvol_nucphys_yr_page , re_numeration_vol_nucphys_yr_subvol_page , re_numeration_vol_yr_page , re_numeration_nucphys_vol_yr_page , re_numeration_vol_nucphys_series_yr_page , re_numeration_vol_series_nucphys_page_yr , re_numeration_vol_nucphys_series_page_yr , re_numeration_yr_vol_page , ) for pattern in patterns : match = pattern . match ( line ) if match : info = match . groupdict ( ) series = info . get ( 'series' , None ) if not series : series = extract_series_from_volume ( info [ 'vol' ] ) if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt' ] if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt2' ] return { 'year' : info . get ( 'year' , None ) , 'series' : series , 'volume' : info [ 'vol_num' ] , 'page' : info [ 'page' ] or info [ 'jinst_page' ] , 'page_end' : info [ 'page_end' ] , 'len' : match . end ( ) } return None | Given a reference line attempt to locate instances of citation numeration in the line . |
47,709 | def remove_reference_line_marker ( line ) : marker_patterns = get_reference_line_numeration_marker_patterns ( ) line = line . lstrip ( ) marker_match = regex_match_list ( line , marker_patterns ) if marker_match is not None : marker_val = marker_match . group ( u'mark' ) line = line [ marker_match . end ( ) : ] . lstrip ( ) else : marker_val = u" " return ( marker_val , line ) | Trim a reference line s marker from the beginning of the line . |
47,710 | def roman2arabic ( num ) : t = 0 p = 0 for r in num : n = 10 ** ( 205558 % ord ( r ) % 7 ) % 9995 t += n - 2 * p % n p = n return t | Convert numbers from roman to arabic |
47,711 | def format_report_number ( citation_elements ) : re_report = re . compile ( ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$' , re . UNICODE ) for el in citation_elements : if el [ 'type' ] == 'REPORTNUMBER' : m = re_report . match ( el [ 'report_num' ] ) if m : name = m . group ( 'name' ) if not name . endswith ( '-' ) : el [ 'report_num' ] = m . group ( 'name' ) + '-' + m . group ( 'nums' ) return citation_elements | Format report numbers that are missing a dash |
47,712 | def format_hep ( citation_elements ) : prefixes = ( 'astro-ph-' , 'hep-th-' , 'hep-ph-' , 'hep-ex-' , 'hep-lat-' , 'math-ph-' ) for el in citation_elements : if el [ 'type' ] == 'REPORTNUMBER' : for p in prefixes : if el [ 'report_num' ] . startswith ( p ) : el [ 'report_num' ] = el [ 'report_num' ] [ : len ( p ) - 1 ] + '/' + el [ 'report_num' ] [ len ( p ) : ] return citation_elements | Format hep - th report numbers with a dash |
47,713 | def look_for_books ( citation_elements , kbs ) : title = None for el in citation_elements : if el [ 'type' ] == 'QUOTED' : title = el break if title : normalized_title = title [ 'title' ] . upper ( ) if normalized_title in kbs [ 'books' ] : line = kbs [ 'books' ] [ normalized_title ] el = { 'type' : 'BOOK' , 'misc_txt' : '' , 'authors' : line [ 0 ] , 'title' : line [ 1 ] , 'year' : line [ 2 ] . strip ( ';' ) } citation_elements . append ( el ) citation_elements . remove ( title ) return citation_elements | Look for books in our kb |
47,714 | def split_volume_from_journal ( citation_elements ) : for el in citation_elements : if el [ 'type' ] == 'JOURNAL' and ';' in el [ 'title' ] : el [ 'title' ] , series = el [ 'title' ] . rsplit ( ';' , 1 ) el [ 'volume' ] = series + el [ 'volume' ] return citation_elements | Split volume from journal title |
47,715 | def remove_b_for_nucl_phys ( citation_elements ) : for el in citation_elements : if el [ 'type' ] == 'JOURNAL' and el [ 'title' ] == 'Nucl.Phys.Proc.Suppl.' and 'volume' in el and ( el [ 'volume' ] . startswith ( 'b' ) or el [ 'volume' ] . startswith ( 'B' ) ) : el [ 'volume' ] = el [ 'volume' ] [ 1 : ] return citation_elements | Removes b from the volume of some journals |
47,716 | def mangle_volume ( citation_elements ) : volume_re = re . compile ( ur"(\d+)([A-Z])" , re . U | re . I ) for el in citation_elements : if el [ 'type' ] == 'JOURNAL' : matches = volume_re . match ( el [ 'volume' ] ) if matches : el [ 'volume' ] = matches . group ( 2 ) + matches . group ( 1 ) return citation_elements | Make sure the volume letter is before the volume number |
47,717 | def split_citations ( citation_elements ) : splitted_citations = [ ] new_elements = [ ] current_recid = None current_doi = None def check_ibid ( current_elements , trigger_el ) : for el in new_elements : if el [ 'type' ] == 'AUTH' : return if trigger_el . get ( 'is_ibid' , False ) : if splitted_citations : els = chain ( reversed ( current_elements ) , reversed ( splitted_citations [ - 1 ] ) ) else : els = reversed ( current_elements ) for el in els : if el [ 'type' ] == 'AUTH' : new_elements . append ( el . copy ( ) ) break def start_new_citation ( ) : splitted_citations . append ( new_elements [ : ] ) del new_elements [ : ] for el in citation_elements : try : el_recid = el [ 'recid' ] except KeyError : el_recid = None if current_recid and el_recid and current_recid == el_recid : pass elif current_recid and el_recid and current_recid != el_recid or current_doi and el [ 'type' ] == 'DOI' and current_doi != el [ 'doi_string' ] : start_new_citation ( ) balance_authors ( splitted_citations , new_elements ) elif ';' in el [ 'misc_txt' ] : misc_txt , el [ 'misc_txt' ] = el [ 'misc_txt' ] . split ( ';' , 1 ) if misc_txt : new_elements . append ( { 'type' : 'MISC' , 'misc_txt' : misc_txt } ) start_new_citation ( ) current_recid = el_recid while ';' in el [ 'misc_txt' ] : misc_txt , el [ 'misc_txt' ] = el [ 'misc_txt' ] . split ( ';' , 1 ) if misc_txt : new_elements . append ( { 'type' : 'MISC' , 'misc_txt' : misc_txt } ) start_new_citation ( ) current_recid = None if el_recid : current_recid = el_recid if el [ 'type' ] == 'DOI' : current_doi = el [ 'doi_string' ] check_ibid ( new_elements , el ) new_elements . append ( el ) splitted_citations . append ( new_elements ) return [ el for el in splitted_citations if not empty_citation ( el ) ] | Split a citation line in multiple citations |
47,718 | def look_for_hdl ( citation_elements ) : for el in list ( citation_elements ) : matched_hdl = re_hdl . finditer ( el [ 'misc_txt' ] ) for match in reversed ( list ( matched_hdl ) ) : hdl_el = { 'type' : 'HDL' , 'hdl_id' : match . group ( 'hdl_id' ) , 'misc_txt' : el [ 'misc_txt' ] [ match . end ( ) : ] } el [ 'misc_txt' ] = el [ 'misc_txt' ] [ 0 : match . start ( ) ] citation_elements . insert ( citation_elements . index ( el ) + 1 , hdl_el ) | Looks for handle identifiers in the misc txt of the citation elements |
47,719 | def look_for_hdl_urls ( citation_elements ) : for el in citation_elements : if el [ 'type' ] == 'URL' : match = re_hdl . match ( el [ 'url_string' ] ) if match : el [ 'type' ] = 'HDL' el [ 'hdl_id' ] = match . group ( 'hdl_id' ) del el [ 'url_desc' ] del el [ 'url_string' ] | Looks for handle identifiers that have already been identified as urls |
47,720 | def parse_reference_line ( ref_line , kbs , bad_titles_count = { } , linker_callback = None ) : line_marker , ref_line = remove_reference_line_marker ( ref_line ) ref_line , identified_dois = identify_and_tag_DOI ( ref_line ) ref_line , identified_urls = identify_and_tag_URLs ( ref_line ) tagged_line , bad_titles_count = tag_reference_line ( ref_line , kbs , bad_titles_count ) LOGGER . debug ( "tags %r" , tagged_line ) citation_elements , line_marker , counts = parse_tagged_reference_line ( line_marker , tagged_line , identified_dois , identified_urls ) split_volume_from_journal ( citation_elements ) format_volume ( citation_elements ) handle_special_journals ( citation_elements , kbs ) format_report_number ( citation_elements ) format_author_ed ( citation_elements ) look_for_books ( citation_elements , kbs ) format_hep ( citation_elements ) remove_b_for_nucl_phys ( citation_elements ) mangle_volume ( citation_elements ) arxiv_urls_to_report_numbers ( citation_elements ) look_for_hdl ( citation_elements ) look_for_hdl_urls ( citation_elements ) if linker_callback : associate_recids ( citation_elements , linker_callback ) splitted_citations = split_citations ( citation_elements ) look_for_implied_ibids ( splitted_citations ) add_year_elements ( splitted_citations ) look_for_undetected_books ( splitted_citations , kbs ) if linker_callback : for citations in splitted_citations : associate_recids ( citations , linker_callback ) remove_duplicated_authors ( splitted_citations ) remove_duplicated_dois ( splitted_citations ) remove_duplicated_collaborations ( splitted_citations ) add_recid_elements ( splitted_citations ) print_citations ( splitted_citations , line_marker ) return splitted_citations , line_marker , counts , bad_titles_count | Parse one reference line |
47,721 | def search_for_book_in_misc ( citation , kbs ) : citation_year = year_from_citation ( citation ) for citation_element in citation : LOGGER . debug ( u"Searching for book title in: %s" , citation_element [ 'misc_txt' ] ) for title in kbs [ 'books' ] : startIndex = find_substring_ignore_special_chars ( citation_element [ 'misc_txt' ] , title ) if startIndex != - 1 : line = kbs [ 'books' ] [ title . upper ( ) ] book_year = line [ 2 ] . strip ( ';' ) book_authors = line [ 0 ] book_found = False if citation_year == book_year : book_found = True for author in get_possible_author_names ( citation ) : if find_substring_ignore_special_chars ( book_authors , author ) != - 1 : book_found = True for author in re . findall ( '[a-zA-Z]{4,}' , book_authors ) : if find_substring_ignore_special_chars ( citation_element [ 'misc_txt' ] , author ) != - 1 : book_found = True if book_found : LOGGER . debug ( u"Book found: %s" , title ) book_element = { 'type' : 'BOOK' , 'misc_txt' : '' , 'authors' : book_authors , 'title' : line [ 1 ] , 'year' : book_year } citation . append ( book_element ) citation_element [ 'misc_txt' ] = cut_substring_with_special_chars ( citation_element [ 'misc_txt' ] , title , startIndex ) citation_element [ 'misc_txt' ] = remove_year ( citation_element [ 'misc_txt' ] , book_year ) return True LOGGER . debug ( "Book not found!" ) return False | Searches for books in the misc_txt field if the citation is not recognized as anything like a journal book etc . |
47,722 | def map_tag_to_subfield ( tag_type , line , cur_misc_txt , dest ) : closing_tag = '</cds.%s>' % tag_type idx_closing_tag = line . find ( closing_tag ) if idx_closing_tag == - 1 : identified_citation_element = None line = line [ len ( '<cds.%s>' % tag_type ) : ] else : tag_content = line [ : idx_closing_tag ] identified_citation_element = { 'type' : tag_type , 'misc_txt' : cur_misc_txt , dest : tag_content } ending_tag_pos = idx_closing_tag + len ( closing_tag ) line = line [ ending_tag_pos : ] cur_misc_txt = u"" return identified_citation_element , line , cur_misc_txt | Create a new reference element |
47,723 | def remove_leading_garbage_lines_from_reference_section ( ref_sectn ) : p_email = re . compile ( ur'^\s*e\-?mail' , re . UNICODE ) while ref_sectn and ( ref_sectn [ 0 ] . isspace ( ) or p_email . match ( ref_sectn [ 0 ] ) ) : ref_sectn . pop ( 0 ) return ref_sectn | Sometimes the first lines of the extracted references are completely blank or email addresses . These must be removed as they are not references . |
47,724 | def get_plaintext_document_body ( fpath , keep_layout = False ) : textbody = [ ] mime_type = magic . from_file ( fpath , mime = True ) if mime_type == "text/plain" : with open ( fpath , "r" ) as f : textbody = [ line . decode ( "utf-8" ) for line in f . readlines ( ) ] elif mime_type == "application/pdf" : textbody = convert_PDF_to_plaintext ( fpath , keep_layout ) else : raise UnknownDocumentTypeError ( mime_type ) return textbody | Given a file - path to a full - text return a list of unicode strings whereby each string is a line of the fulltext . In the case of a plain - text document this simply means reading the contents in from the file . In the case of a PDF however this means converting the document to plaintext . It raises UnknownDocumentTypeError if the document is not a PDF or plain text . |
47,725 | def parse_references ( reference_lines , recid = None , override_kbs_files = None , reference_format = u"{title} {volume} ({year}) {page}" , linker_callback = None ) : kbs = get_kbs ( custom_kbs_files = override_kbs_files ) processed_references , counts , dummy_bad_titles_count = parse_references_elements ( reference_lines , kbs , linker_callback ) return ( build_references ( processed_references , reference_format ) , build_stats ( counts ) ) | Parse a list of references |
47,726 | def build_stats ( counts ) : stats = { 'status' : 0 , 'reportnum' : counts [ 'reportnum' ] , 'title' : counts [ 'title' ] , 'author' : counts [ 'auth_group' ] , 'url' : counts [ 'url' ] , 'doi' : counts [ 'doi' ] , 'misc' : counts [ 'misc' ] , } stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats stats [ "old_stats_str" ] = stats_str stats [ "date" ] = datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) stats [ "version" ] = version return stats | Return stats information from counts structure . |
47,727 | def replace_undesirable_characters ( line ) : for bad_string , replacement in UNDESIRABLE_STRING_REPLACEMENTS : line = line . replace ( bad_string , replacement ) for bad_char , replacement in iteritems ( UNDESIRABLE_CHAR_REPLACEMENTS ) : line = line . replace ( bad_char , replacement ) return line | Replace certain bad characters in a text line . |
47,728 | def convert_PDF_to_plaintext ( fpath , keep_layout = False ) : if not os . path . isfile ( CFG_PATH_PDFTOTEXT ) : raise IOError ( 'Missing pdftotext executable' ) if keep_layout : layout_option = "-layout" else : layout_option = "-raw" doclines = [ ] p_break_in_line = re . compile ( ur'^\s*\f(.+)$' , re . UNICODE ) cmd_pdftotext = [ CFG_PATH_PDFTOTEXT , layout_option , "-q" , "-enc" , "UTF-8" , fpath , "-" ] LOGGER . debug ( u"%s" , ' ' . join ( cmd_pdftotext ) ) pipe_pdftotext = subprocess . Popen ( cmd_pdftotext , stdout = subprocess . PIPE ) for docline in pipe_pdftotext . stdout : unicodeline = docline . decode ( "utf-8" ) m_break_in_line = p_break_in_line . match ( unicodeline ) if m_break_in_line is None : doclines . append ( unicodeline ) else : doclines . append ( u"\f" ) doclines . append ( m_break_in_line . group ( 1 ) ) LOGGER . debug ( u"convert_PDF_to_plaintext found: %s lines of text" , len ( doclines ) ) return doclines | Convert PDF to txt using pdftotext |
47,729 | def get_author_affiliation_numeration_str ( punct = None ) : re_number = r'(?:\d\d?)' re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number if punct is None : re_punct = r"(?:[\{\(\[]?)" else : re_punct = re . escape ( punct ) numeration_str = r % { 'num' : re_number , 'num_chain' : re_chained_numbers , 'punct' : re_punct } return numeration_str | The numeration which can be applied to author names . Numeration is sometimes found next to authors of papers . |
47,730 | def make_auth_regex_str ( etal , initial_surname_author = None , surname_initial_author = None ) : if not initial_surname_author : initial_surname_author = get_initial_surname_author_pattern ( ) if not surname_initial_author : surname_initial_author = get_surname_initial_author_pattern ( ) return ur % { 'etal' : etal , 'i_s_author' : initial_surname_author , 's_i_author' : surname_initial_author , 'ed' : re_ed_notation } | Returns a regular expression to be used to identify groups of author names in a citation . This method contains patterns for default authors so no arguments are needed for the most reliable form of matching . |
47,731 | def find_reference_section ( docbody ) : ref_details = None title_patterns = get_reference_section_title_patterns ( ) for title_pattern in title_patterns : for reversed_index , line in enumerate ( reversed ( docbody ) ) : title_match = title_pattern . match ( line ) if title_match : title = title_match . group ( 'title' ) index = len ( docbody ) - 1 - reversed_index temp_ref_details , found_title = find_numeration ( docbody [ index : index + 6 ] , title ) if temp_ref_details : if ref_details and 'title' in ref_details and ref_details [ 'title' ] and not temp_ref_details [ 'title' ] : continue if ref_details and 'marker' in ref_details and ref_details [ 'marker' ] and not temp_ref_details [ 'marker' ] : continue ref_details = temp_ref_details ref_details [ 'start_line' ] = index ref_details [ 'title_string' ] = title if found_title : break if ref_details : break return ref_details | Search in document body for its reference section . |
47,732 | def find_numeration ( docbody , title ) : ref_details , found_title = find_numeration_in_title ( docbody , title ) if not ref_details : ref_details , found_title = find_numeration_in_body ( docbody ) return ref_details , found_title | Find numeration pattern |
47,733 | def get_reference_lines ( docbody , ref_sect_start_line , ref_sect_end_line , ref_sect_title , ref_line_marker_ptn , title_marker_same_line ) : start_idx = ref_sect_start_line if title_marker_same_line : title_start = docbody [ start_idx ] . find ( ref_sect_title ) if title_start != - 1 : docbody [ start_idx ] = docbody [ start_idx ] [ title_start + len ( ref_sect_title ) : ] elif ref_sect_title is not None : start_idx += 1 if ref_sect_end_line is not None : ref_lines = docbody [ start_idx : ref_sect_end_line + 1 ] else : ref_lines = docbody [ start_idx : ] if ref_sect_title : ref_lines = strip_footer ( ref_lines , ref_sect_title ) return rebuild_reference_lines ( ref_lines , ref_line_marker_ptn ) | After the reference section of a document has been identified and the first and last lines of the reference section have been recorded this function is called to take the reference lines out of the document body . The document s reference lines are returned in a list of strings whereby each string is a reference line . Before this can be done however the reference section is passed to another function that rebuilds any broken reference lines . |
47,734 | def match_pagination ( ref_line ) : pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$' re_footer = re . compile ( pattern , re . UNICODE ) match = re_footer . match ( ref_line ) if match : return int ( match . group ( 1 ) ) return None | Remove footer pagination from references lines |
47,735 | def strip_footer ( ref_lines , section_title ) : pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re . escape ( section_title ) re_footer = re . compile ( pattern , re . UNICODE ) return [ l for l in ref_lines if not re_footer . match ( l ) ] | Remove footer title from references lines |
47,736 | def extract_references_from_url ( url , headers = None , chunk_size = 1024 , ** kwargs ) : filename , filepath = mkstemp ( suffix = u"_{0}" . format ( os . path . basename ( url ) ) , ) os . close ( filename ) try : req = requests . get ( url = url , headers = headers , stream = True ) req . raise_for_status ( ) with open ( filepath , 'wb' ) as f : for chunk in req . iter_content ( chunk_size ) : f . write ( chunk ) references = extract_references_from_file ( filepath , ** kwargs ) except requests . exceptions . HTTPError : raise FullTextNotAvailableError ( u"URL not found: '{0}'" . format ( url ) ) , None , sys . exc_info ( ) [ 2 ] finally : os . remove ( filepath ) return references | Extract references from the pdf specified in the url . |
47,737 | def extract_references_from_file ( path , recid = None , reference_format = u"{title} {volume} ({year}) {page}" , linker_callback = None , override_kbs_files = None ) : if not os . path . isfile ( path ) : raise FullTextNotAvailableError ( u"File not found: '{0}'" . format ( path ) ) docbody = get_plaintext_document_body ( path ) reflines , dummy , dummy = extract_references_from_fulltext ( docbody ) if not reflines : docbody = get_plaintext_document_body ( path , keep_layout = True ) reflines , dummy , dummy = extract_references_from_fulltext ( docbody ) parsed_refs , stats = parse_references ( reflines , recid = recid , reference_format = reference_format , linker_callback = linker_callback , override_kbs_files = override_kbs_files , ) if magic . from_file ( path , mime = True ) == "application/pdf" : texkeys = extract_texkeys_from_pdf ( path ) if len ( texkeys ) == len ( parsed_refs ) : parsed_refs = [ dict ( ref , texkey = [ key ] ) for ref , key in izip ( parsed_refs , texkeys ) ] return parsed_refs | Extract references from a local pdf file . |
47,738 | def extract_references_from_string ( source , is_only_references = True , recid = None , reference_format = "{title} {volume} ({year}) {page}" , linker_callback = None , override_kbs_files = None ) : docbody = source . split ( '\n' ) if not is_only_references : reflines , dummy , dummy = extract_references_from_fulltext ( docbody ) else : refs_info = get_reference_section_beginning ( docbody ) if not refs_info : refs_info , dummy = find_numeration_in_body ( docbody ) refs_info [ 'start_line' ] = 0 refs_info [ 'end_line' ] = len ( docbody ) - 1 , reflines = rebuild_reference_lines ( docbody , refs_info [ 'marker_pattern' ] ) parsed_refs , stats = parse_references ( reflines , recid = recid , reference_format = reference_format , linker_callback = linker_callback , override_kbs_files = override_kbs_files , ) return parsed_refs | Extract references from a raw string . |
47,739 | def extract_journal_reference ( line , override_kbs_files = None ) : kbs = get_kbs ( custom_kbs_files = override_kbs_files ) references , dummy_m , dummy_c , dummy_co = parse_reference_line ( line , kbs ) for elements in references : for el in elements : if el [ 'type' ] == 'JOURNAL' : return el | Extract the journal reference from string . |
47,740 | def build_references ( citations , reference_format = False ) : return [ c for citation_elements in citations for elements in citation_elements [ 'elements' ] for c in build_reference_fields ( elements , citation_elements [ 'line_marker' ] , citation_elements [ 'raw_ref' ] , reference_format ) ] | Build list of reference dictionaries from a references list |
47,741 | def build_reference_fields ( citation_elements , line_marker , raw_ref , reference_format ) : current_field = create_reference_field ( line_marker ) current_field [ 'raw_ref' ] = [ raw_ref ] reference_fields = [ current_field ] for element in citation_elements : misc_txt = element [ 'misc_txt' ] if misc_txt . strip ( "., [](){}" ) : misc_txt = misc_txt . lstrip ( '])} ,.' ) . rstrip ( '[({ ,.' ) add_subfield ( current_field , 'misc' , misc_txt ) if element [ 'type' ] == "JOURNAL" : add_journal_subfield ( current_field , element , reference_format ) elif element [ 'type' ] == "REPORTNUMBER" : add_subfield ( current_field , 'reportnumber' , element [ 'report_num' ] ) elif element [ 'type' ] == "URL" : if element [ 'url_string' ] == element [ 'url_desc' ] : add_subfield ( current_field , 'url' , element [ 'url_string' ] ) else : add_subfield ( current_field , 'url' , element [ 'url_string' ] ) add_subfield ( current_field , 'urldesc' , element [ 'url_desc' ] ) elif element [ 'type' ] == "DOI" : add_subfield ( current_field , 'doi' , 'doi:' + element [ 'doi_string' ] ) elif element [ 'type' ] == "HDL" : add_subfield ( current_field , 'hdl' , 'hdl:' + element [ 'hdl_id' ] ) elif element [ 'type' ] == "AUTH" : value = element [ 'auth_txt' ] if element [ 'auth_type' ] == 'incl' : value = "(%s)" % value add_subfield ( current_field , 'author' , value ) elif element [ 'type' ] == "QUOTED" : add_subfield ( current_field , 'title' , element [ 'title' ] ) elif element [ 'type' ] == "ISBN" : add_subfield ( current_field , 'isbn' , element [ 'ISBN' ] ) elif element [ 'type' ] == "BOOK" : add_subfield ( current_field , 'title' , element [ 'title' ] ) elif element [ 'type' ] == "PUBLISHER" : add_subfield ( current_field , 'publisher' , element [ 'publisher' ] ) elif element [ 'type' ] == "YEAR" : add_subfield ( current_field , 'year' , element [ 'year' ] ) elif element [ 'type' ] == "COLLABORATION" : add_subfield ( current_field , 'collaboration' , element [ 'collaboration' ] ) elif element [ 'type' ] == "RECID" : add_subfield ( current_field , 'recid' , str ( element [ 'recid' ] ) ) return reference_fields | Create the final representation of the reference information . |
47,742 | def extract_texkeys_from_pdf ( pdf_file ) : with open ( pdf_file , 'rb' ) as pdf_stream : try : pdf = PdfFileReader ( pdf_stream , strict = False ) destinations = pdf . getNamedDestinations ( ) except Exception : LOGGER . debug ( u"PDF: Internal PyPDF2 error, no TeXkeys returned." ) return [ ] refs = [ dest for dest in destinations . iteritems ( ) if re_reference_in_dest . match ( dest [ 0 ] ) ] try : if _destinations_in_two_columns ( pdf , refs ) : LOGGER . debug ( u"PDF: Using two-column layout" ) def sortfunc ( dest_couple ) : return _destination_position ( pdf , dest_couple [ 1 ] ) else : LOGGER . debug ( u"PDF: Using single-column layout" ) def sortfunc ( dest_couple ) : ( page , _ , ypos , xpos ) = _destination_position ( pdf , dest_couple [ 1 ] ) return ( page , ypos , xpos ) refs . sort ( key = sortfunc ) return [ re_reference_in_dest . match ( destname ) . group ( 1 ) for ( destname , _ ) in refs ] except Exception : LOGGER . debug ( u"PDF: Impossible to determine layout, no TeXkeys returned" ) return [ ] | Extract the texkeys from the given PDF file |
47,743 | def get_reference_line_numeration_marker_patterns ( prefix = u'' ) : title = u"" if type ( prefix ) in ( str , unicode ) : title = prefix g_name = u'(?P<mark>' g_close = u')' space = ur'\s*' patterns = [ space + title + g_name + ur'\[\s*(?P<marknum>\d+)\s*\]' + g_close , space + title + g_name + ur'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close , space + title + g_name + ur'\{\s*(?P<marknum>\d+)\s*\}' + g_close , space + title + g_name + ur'\<\s*(?P<marknum>\d+)\s*\>' + g_close , space + title + g_name + ur'\(\s*(?P<marknum>\d+)\s*\)' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\.(?!\d)' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s+' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\]' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\}' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\)' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\>' + g_close , space + title + g_name + ur'\[\s*\d+\.\d+\s*\]' + g_close , space + title + g_name + ur'\[\s*\]' + g_close , space + title + g_name + ur'\*' + g_close , ] return [ re . compile ( p , re . I | re . UNICODE ) for p in patterns ] | Return a list of compiled regex patterns used to search for the marker of a reference line in a full - text document . |
47,744 | def get_post_reference_section_title_patterns ( ) : compiled_patterns = [ ] thead = ur'^\s*([\{\(\<\[]?\s*(\w|\d)\s*[\)\}\>\.\-\]]?\s*)?' ttail = ur'(\s*\:\s*)?' numatn = ur'(\d+|\w\b|i{1,3}v?|vi{0,3})[\.\,]{0,2}\b' roman_numbers = ur'[LVIX]' patterns = [ thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'appendix' ) + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'appendices' ) + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'acknowledgement' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'acknowledgment' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'table' ) + ur'\w?s?\d?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'figure' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'list of figure' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'annex' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'discussion' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'remercie' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'index' ) + ur's?' + ttail , thead + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'summary' ) + ur's?' + ttail , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'figure' ) + numatn , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'fig' ) + ur'\.\s*' + numatn , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'fig' ) + ur'\.?\s*\d\w?\b' , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'table' ) + numatn , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'tab' ) + ur'\.\s*' + numatn , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'tab' ) + ur'\.?\s*\d\w?\b' , ur'^\s*' + roman_numbers + ur'\.?\s*[Cc]onclusion[\w\s]*$' , ur'^\s*Appendix\s[A-Z]\s*\:\s*[a-zA-Z]+\s*' , ] for p in patterns : compiled_patterns . append ( re . compile ( p , re . I | re . UNICODE ) ) return compiled_patterns | Return a list of compiled regex patterns used to search for the title of the section after the reference section in a full - text document . |
47,745 | def get_post_reference_section_keyword_patterns ( ) : compiled_patterns = [ ] patterns = [ u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'prepared' ) + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'created' ) + ur').*(AAS\s*)?\sLATEX' , ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'macros' ) + u'v' , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'This paper has been produced using' ) , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'This article was processed by the author using Springer-Verlag' ) + u' LATEX' ] for p in patterns : compiled_patterns . append ( re . compile ( p , re . I | re . UNICODE ) ) return compiled_patterns | Return a list of compiled regex patterns used to search for various keywords that can often be found after and therefore suggest the end of a reference section in a full - text document . |
47,746 | def regex_match_list ( line , patterns ) : m = None for ptn in patterns : m = ptn . match ( line ) if m is not None : break return m | Given a list of COMPILED regex patters perform the re . match operation on the line for every pattern . Break from searching at the first match returning the match object . In the case that no patterns match the None type will be returned . |
47,747 | def get_url_repair_patterns ( ) : file_types_list = [ ur'h\s*t\s*m' , ur'h\s*t\s*m\s*l' , ur't\s*x\s*t' ur'p\s*h\s*p' ur'a\s*s\s*p\s*' ur'j\s*s\s*p' , ur'p\s*y' , ur'p\s*l' , ur'x\s*m\s*l' , ur'j\s*p\s*g' , ur'g\s*i\s*f' ur'm\s*o\s*v' ur's\s*w\s*f' ur'p\s*d\s*f' ur'p\s*s' ur'd\s*o\s*c' , ur't\s*e\s*x' , ur's\s*h\s*t\s*m\s*l' , ] pattern_list = [ ur'(h\s*t\s*t\s*p\s*\:\s*\/\s*\/)' , ur'(f\s*t\s*p\s*\:\s*\/\s*\/\s*)' , ur'((http|ftp):\/\/\s*[\w\d])' , ur'((http|ftp):\/\/([\w\d\s\._\-])+?\s*\/)' , ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)+)' , ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)*([\w\d\_\s\-]+\.\s?[\w\d]+))' , ] pattern_list = [ re . compile ( p , re . I | re . UNICODE ) for p in pattern_list ] p = ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*([\w\d\_\-]+\.%s))' for extension in file_types_list : p_url = re . compile ( p % extension , re . I | re . UNICODE ) pattern_list . append ( p_url ) p_url = re . compile ( r'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*\s*?([\w\d\_\.\-]\s?){1,10}\s*)$' , re . I | re . UNICODE ) pattern_list . append ( p_url ) return pattern_list | Initialise and return a list of precompiled regexp patterns that are used to try to re - assemble URLs that have been broken during a document s conversion to plain - text . |
47,748 | def join_lines ( line1 , line2 ) : if line1 == u"" : pass elif line1 [ - 1 ] == u'-' : line1 = line1 [ : - 1 ] elif line1 [ - 1 ] != u' ' : line1 = line1 + u' ' return line1 + line2 | Join 2 lines of text |
47,749 | def repair_broken_urls ( line ) : def _chop_spaces_in_url_match ( m ) : return m . group ( 1 ) . replace ( " " , "" ) for ptn in re_list_url_repair_patterns : line = ptn . sub ( _chop_spaces_in_url_match , line ) return line | Attempt to repair broken URLs in a line of text . |
47,750 | def remove_and_record_multiple_spaces_in_line ( line ) : removed_spaces = { } multispace_matches = re_group_captured_multiple_space . finditer ( line ) for multispace in multispace_matches : removed_spaces [ multispace . start ( ) ] = ( multispace . end ( ) - multispace . start ( ) - 1 ) line = re_group_captured_multiple_space . sub ( u' ' , line ) return ( removed_spaces , line ) | For a given string locate all ocurrences of multiple spaces together in the line record the number of spaces found at each position and replace them with a single space . |
47,751 | def remove_page_boundary_lines ( docbody ) : number_head_lines = number_foot_lines = 0 if not document_contains_text ( docbody ) : return docbody page_break_posns = get_page_break_positions ( docbody ) number_head_lines = get_number_header_lines ( docbody , page_break_posns ) number_foot_lines = get_number_footer_lines ( docbody , page_break_posns ) docbody = strip_headers_footers_pagebreaks ( docbody , page_break_posns , number_head_lines , number_foot_lines ) return docbody | Try to locate page breaks headers and footers within a document body and remove the array cells at which they are found . |
47,752 | def get_page_break_positions ( docbody ) : page_break_posns = [ ] p_break = re . compile ( ur'^\s*\f\s*$' , re . UNICODE ) num_document_lines = len ( docbody ) for i in xrange ( num_document_lines ) : if p_break . match ( docbody [ i ] ) is not None : page_break_posns . append ( i ) return page_break_posns | Locate page breaks in the list of document lines and create a list positions in the document body list . |
47,753 | def get_number_header_lines ( docbody , page_break_posns ) : remaining_breaks = len ( page_break_posns ) - 1 num_header_lines = empty_line = 0 p_wordSearch = re . compile ( ur'([A-Za-z0-9-]+)' , re . UNICODE ) if remaining_breaks > 2 : if remaining_breaks > 3 : next_head = 2 else : next_head = 1 keep_checking = 1 while keep_checking : cur_break = 1 if docbody [ ( page_break_posns [ cur_break ] + num_header_lines + 1 ) ] . isspace ( ) : empty_line = 1 if ( page_break_posns [ cur_break ] + num_header_lines + 1 ) == ( page_break_posns [ ( cur_break + 1 ) ] ) : keep_checking = 0 grps_headLineWords = p_wordSearch . findall ( docbody [ ( page_break_posns [ cur_break ] + num_header_lines + 1 ) ] ) cur_break = cur_break + next_head while ( cur_break < remaining_breaks ) and keep_checking : lineno = page_break_posns [ cur_break ] + num_header_lines + 1 if lineno >= len ( docbody ) : keep_checking = 0 break grps_thisLineWords = p_wordSearch . findall ( docbody [ lineno ] ) if empty_line : if len ( grps_thisLineWords ) != 0 : keep_checking = 0 else : if ( len ( grps_thisLineWords ) == 0 ) or ( len ( grps_headLineWords ) != len ( grps_thisLineWords ) ) : keep_checking = 0 else : keep_checking = check_boundary_lines_similar ( grps_headLineWords , grps_thisLineWords ) cur_break = cur_break + next_head if keep_checking : num_header_lines = num_header_lines + 1 empty_line = 0 return num_header_lines | Try to guess the number of header lines each page of a document has . The positions of the page breaks in the document are used to try to guess the number of header lines . |
47,754 | def get_number_footer_lines ( docbody , page_break_posns ) : num_breaks = len ( page_break_posns ) num_footer_lines = 0 empty_line = 0 keep_checking = 1 p_wordSearch = re . compile ( unicode ( r'([A-Za-z0-9-]+)' ) , re . UNICODE ) if num_breaks > 2 : while keep_checking : cur_break = 1 if page_break_posns [ cur_break ] - num_footer_lines - 1 < 0 or page_break_posns [ cur_break ] - num_footer_lines - 1 > len ( docbody ) - 1 : break if docbody [ ( page_break_posns [ cur_break ] - num_footer_lines - 1 ) ] . isspace ( ) : empty_line = 1 grps_headLineWords = p_wordSearch . findall ( docbody [ ( page_break_posns [ cur_break ] - num_footer_lines - 1 ) ] ) cur_break = cur_break + 1 while ( cur_break < num_breaks ) and keep_checking : grps_thisLineWords = p_wordSearch . findall ( docbody [ ( page_break_posns [ cur_break ] - num_footer_lines - 1 ) ] ) if empty_line : if len ( grps_thisLineWords ) != 0 : keep_checking = 0 else : if ( len ( grps_thisLineWords ) == 0 ) or ( len ( grps_headLineWords ) != len ( grps_thisLineWords ) ) : keep_checking = 0 else : keep_checking = check_boundary_lines_similar ( grps_headLineWords , grps_thisLineWords ) cur_break = cur_break + 1 if keep_checking : num_footer_lines = num_footer_lines + 1 empty_line = 0 return num_footer_lines | Try to guess the number of footer lines each page of a document has . The positions of the page breaks in the document are used to try to guess the number of footer lines . |
47,755 | def strip_headers_footers_pagebreaks ( docbody , page_break_posns , num_head_lines , num_foot_lines ) : num_breaks = len ( page_break_posns ) page_lens = [ ] for x in xrange ( 0 , num_breaks ) : if x < num_breaks - 1 : page_lens . append ( page_break_posns [ x + 1 ] - page_break_posns [ x ] ) page_lens . sort ( ) if ( len ( page_lens ) > 0 ) and ( num_head_lines + num_foot_lines + 1 < page_lens [ 0 ] ) : page_break_posns . reverse ( ) first = 1 for i in xrange ( 0 , len ( page_break_posns ) ) : if not first : for dummy in xrange ( 1 , num_head_lines + 1 ) : docbody [ page_break_posns [ i ] + 1 : page_break_posns [ i ] + 2 ] = [ ] else : first = 0 docbody [ page_break_posns [ i ] : page_break_posns [ i ] + 1 ] = [ ] if i != len ( page_break_posns ) - 1 : for dummy in xrange ( 1 , num_foot_lines + 1 ) : docbody [ page_break_posns [ i ] - num_foot_lines : page_break_posns [ i ] - num_foot_lines + 1 ] = [ ] return docbody | Remove page - break lines header lines and footer lines from the document . |
47,756 | def check_boundary_lines_similar ( l_1 , l_2 ) : num_matches = 0 if ( type ( l_1 ) != list ) or ( type ( l_2 ) != list ) or ( len ( l_1 ) != len ( l_2 ) ) : return 0 num_elements = len ( l_1 ) for i in xrange ( 0 , num_elements ) : if l_1 [ i ] . isdigit ( ) and l_2 [ i ] . isdigit ( ) : num_matches += 1 else : l1_str = l_1 [ i ] . lower ( ) l2_str = l_2 [ i ] . lower ( ) if ( l1_str [ 0 ] == l2_str [ 0 ] ) and ( l1_str [ len ( l1_str ) - 1 ] == l2_str [ len ( l2_str ) - 1 ] ) : num_matches = num_matches + 1 if ( len ( l_1 ) == 0 ) or ( float ( num_matches ) / float ( len ( l_1 ) ) < 0.9 ) : return 0 else : return 1 | Compare two lists to see if their elements are roughly the same . |
47,757 | def make_cache_key ( custom_kbs_files = None ) : if custom_kbs_files : serialized_args = ( '%s=%s' % v for v in iteritems ( custom_kbs_files ) ) serialized_args = ';' . join ( serialized_args ) else : serialized_args = "default" cache_key = md5 ( serialized_args ) . digest ( ) return cache_key | Create cache key for kbs caches instances |
47,758 | def create_institute_numeration_group_regexp_pattern ( patterns ) : patterns_list = [ institute_num_pattern_to_regex ( p [ 1 ] ) for p in patterns ] grouped_numeration_pattern = u"(?P<numn>%s)" % u'|' . join ( patterns_list ) return grouped_numeration_pattern | Using a list of regexp patterns for recognising numeration patterns for institute preprint references ordered by length - longest to shortest - create a grouped OR or of these patterns ready to be used in a bigger regexp . |
47,759 | def build_reportnum_kb ( fpath ) : def _add_institute_preprint_patterns ( preprint_classifications , preprint_numeration_ptns , preprint_reference_search_regexp_patterns , standardised_preprint_reference_categories , kb_line_num ) : if preprint_classifications and preprint_numeration_ptns : ordered_patterns = order_reportnum_patterns_bylen ( preprint_numeration_ptns ) numeration_regexp = create_institute_numeration_group_regexp_pattern ( ordered_patterns ) for classification in preprint_classifications : search_pattern_str = ur'(?:^|[^a-zA-Z0-9\/\.\-])([\[\(]?(?P<categ>' + classification [ 0 ] . strip ( ) + u')' + numeration_regexp + ur'[\]\)]?)' re_search_pattern = re . compile ( search_pattern_str , re . UNICODE ) preprint_reference_search_regexp_patterns [ ( kb_line_num , classification [ 0 ] ) ] = re_search_pattern standardised_preprint_reference_categories [ ( kb_line_num , classification [ 0 ] ) ] = classification [ 1 ] preprint_reference_search_regexp_patterns = { } standardised_preprint_reference_categories = { } current_institute_preprint_classifications = [ ] current_institute_numerations = [ ] re_institute_name = re . compile ( ur'^\*{5}\s*(.+)\s*\*{5}$' , re . UNICODE ) re_preprint_classification = re . compile ( ur'^\s*(\w.*)\s*---\s*(\w.*)\s*$' , re . UNICODE ) re_numeration_pattern = re . compile ( ur'^\<(.+)\>$' , re . UNICODE ) kb_line_num = 0 with file_resolving ( fpath ) as fh : for rawline in fh : if rawline . startswith ( '#' ) : continue kb_line_num += 1 m_institute_name = re_institute_name . search ( rawline ) if m_institute_name : _add_institute_preprint_patterns ( current_institute_preprint_classifications , current_institute_numerations , preprint_reference_search_regexp_patterns , standardised_preprint_reference_categories , kb_line_num ) current_institute_preprint_classifications = [ ] current_institute_numerations = [ ] continue m_preprint_classification = re_preprint_classification . search ( rawline ) if m_preprint_classification : try : current_institute_preprint_classifications . append ( ( m_preprint_classification . group ( 1 ) , m_preprint_classification . group ( 2 ) ) ) except ( AttributeError , NameError ) : pass continue m_numeration_pattern = re_numeration_pattern . search ( rawline ) if m_numeration_pattern : try : current_institute_numerations . append ( m_numeration_pattern . group ( 1 ) ) except ( AttributeError , NameError ) : pass continue _add_institute_preprint_patterns ( current_institute_preprint_classifications , current_institute_numerations , preprint_reference_search_regexp_patterns , standardised_preprint_reference_categories , kb_line_num ) return ( preprint_reference_search_regexp_patterns , standardised_preprint_reference_categories ) | Given the path to a knowledge base file containing the details of institutes and the patterns that their preprint report numbering schemes take create a dictionary of regexp search patterns to recognise these preprint references in reference lines and a dictionary of replacements for non - standard preprint categories in these references . |
47,760 | def _cmp_bystrlen_reverse ( a , b ) : if len ( a ) > len ( b ) : return - 1 elif len ( a ) < len ( b ) : return 1 else : return 0 | A private cmp function to be used by the sort function of a list when ordering the titles found in a knowledge base by string - length - LONGEST - > SHORTEST . |
47,761 | def build_special_journals_kb ( fpath ) : journals = set ( ) with file_resolving ( fpath ) as fh : for line in fh : if line . startswith ( '#' ) : continue if not line . strip ( ) : continue journals . add ( line . strip ( ) ) return journals | Load special journals database from file |
47,762 | def build_journals_re_kb ( fpath ) : def make_tuple ( match ) : regexp = match . group ( 'seek' ) repl = match . group ( 'repl' ) return regexp , repl kb = [ ] with file_resolving ( fpath ) as fh : for rawline in fh : if rawline . startswith ( '#' ) : continue m_kb_line = re_kb_line . search ( rawline ) kb . append ( make_tuple ( m_kb_line ) ) return kb | Load journals regexps knowledge base |
47,763 | def _parse_content_type ( content_type : Optional [ str ] ) -> Tuple [ Optional [ str ] , str ] : if not content_type : return None , "utf-8" else : type_ , parameters = cgi . parse_header ( content_type ) encoding = parameters . get ( "charset" , "utf-8" ) return type_ , encoding | Tease out the content - type and character encoding . |
47,764 | def _decode_body ( content_type : Optional [ str ] , body : bytes , * , strict : bool = False ) -> Any : type_ , encoding = _parse_content_type ( content_type ) if not len ( body ) or not content_type : return None decoded_body = body . decode ( encoding ) if type_ == "application/json" : return json . loads ( decoded_body ) elif type_ == "application/x-www-form-urlencoded" : return json . loads ( urllib . parse . parse_qs ( decoded_body ) [ "payload" ] [ 0 ] ) elif strict : raise ValueError ( f"unrecognized content type: {type_!r}" ) return decoded_body | Decode an HTTP body based on the specified content type . |
47,765 | def validate_event ( payload : bytes , * , signature : str , secret : str ) -> None : signature_prefix = "sha1=" if not signature . startswith ( signature_prefix ) : raise ValidationFailure ( "signature does not start with " f"{repr(signature_prefix)}" ) hmac_ = hmac . new ( secret . encode ( "UTF-8" ) , msg = payload , digestmod = "sha1" ) calculated_sig = signature_prefix + hmac_ . hexdigest ( ) if not hmac . compare_digest ( signature , calculated_sig ) : raise ValidationFailure ( "payload's signature does not align " "with the secret" ) | Validate the signature of a webhook event . |
47,766 | def accept_format ( * , version : str = "v3" , media : Optional [ str ] = None , json : bool = True ) -> str : accept = f"application/vnd.github.{version}" if media is not None : accept += f".{media}" if json : accept += "+json" return accept | Construct the specification of the format that a request should return . |
47,767 | def create_headers ( requester : str , * , accept : str = accept_format ( ) , oauth_token : Optional [ str ] = None , jwt : Optional [ str ] = None ) -> Dict [ str , str ] : if oauth_token is not None and jwt is not None : raise ValueError ( "Cannot pass both oauth_token and jwt." ) headers = { "user-agent" : requester , "accept" : accept } if oauth_token is not None : headers [ "authorization" ] = f"token {oauth_token}" elif jwt is not None : headers [ "authorization" ] = f"bearer {jwt}" return headers | Create a dict representing GitHub - specific header fields . |
47,768 | def decipher_response ( status_code : int , headers : Mapping [ str , str ] , body : bytes ) -> Tuple [ Any , Optional [ RateLimit ] , Optional [ str ] ] : data = _decode_body ( headers . get ( "content-type" ) , body ) if status_code in { 200 , 201 , 204 } : return data , RateLimit . from_http ( headers ) , _next_link ( headers . get ( "link" ) ) else : try : message = data [ "message" ] except ( TypeError , KeyError ) : message = None exc_type : Type [ HTTPException ] if status_code >= 500 : exc_type = GitHubBroken elif status_code >= 400 : exc_type = BadRequest if status_code == 403 : rate_limit = RateLimit . from_http ( headers ) if rate_limit and not rate_limit . remaining : raise RateLimitExceeded ( rate_limit , message ) elif status_code == 422 : errors = data . get ( "errors" , None ) if errors : fields = ", " . join ( repr ( e [ "field" ] ) for e in errors ) message = f"{message} for {fields}" else : message = data [ "message" ] raise InvalidField ( errors , message ) elif status_code >= 300 : exc_type = RedirectionException else : exc_type = HTTPException status_code_enum = http . HTTPStatus ( status_code ) args : Union [ Tuple [ http . HTTPStatus , str ] , Tuple [ http . HTTPStatus ] ] if message : args = status_code_enum , message else : args = status_code_enum , raise exc_type ( * args ) | Decipher an HTTP response for a GitHub API request . |
47,769 | def format_url ( url : str , url_vars : Mapping [ str , Any ] ) -> str : url = urllib . parse . urljoin ( DOMAIN , url ) expanded_url : str = uritemplate . expand ( url , var_dict = url_vars ) return expanded_url | Construct a URL for the GitHub API . |
47,770 | def from_http ( cls , headers : Mapping [ str , str ] , body : bytes , * , secret : Optional [ str ] = None ) -> "Event" : if "x-hub-signature" in headers : if secret is None : raise ValidationFailure ( "secret not provided" ) validate_event ( body , signature = headers [ "x-hub-signature" ] , secret = secret ) elif secret is not None : raise ValidationFailure ( "signature is missing" ) try : data = _decode_body ( headers [ "content-type" ] , body , strict = True ) except ( KeyError , ValueError ) as exc : raise BadRequest ( http . HTTPStatus ( 415 ) , "expected a content-type of " "'application/json' or " "'application/x-www-form-urlencoded'" ) from exc return cls ( data , event = headers [ "x-github-event" ] , delivery_id = headers [ "x-github-delivery" ] ) | Construct an event from HTTP headers and JSON body data . |
47,771 | def from_http ( cls , headers : Mapping [ str , str ] ) -> Optional [ "RateLimit" ] : try : limit = int ( headers [ "x-ratelimit-limit" ] ) remaining = int ( headers [ "x-ratelimit-remaining" ] ) reset_epoch = float ( headers [ "x-ratelimit-reset" ] ) except KeyError : return None else : return cls ( limit = limit , remaining = remaining , reset_epoch = reset_epoch ) | Gather rate limit information from HTTP headers . |
47,772 | def add ( self , func : AsyncCallback , event_type : str , ** data_detail : Any ) -> None : if len ( data_detail ) > 1 : msg = ( ) raise TypeError ( "dispatching based on data details is only " "supported up to one level deep; " f"{len(data_detail)} levels specified" ) elif not data_detail : callbacks = self . _shallow_routes . setdefault ( event_type , [ ] ) callbacks . append ( func ) else : data_key , data_value = data_detail . popitem ( ) data_details = self . _deep_routes . setdefault ( event_type , { } ) specific_detail = data_details . setdefault ( data_key , { } ) callbacks = specific_detail . setdefault ( data_value , [ ] ) callbacks . append ( func ) | Add a new route . |
47,773 | async def _make_request ( self , method : str , url : str , url_vars : Dict [ str , str ] , data : Any , accept : str , jwt : Opt [ str ] = None , oauth_token : Opt [ str ] = None , ) -> Tuple [ bytes , Opt [ str ] ] : if oauth_token is not None and jwt is not None : raise ValueError ( "Cannot pass both oauth_token and jwt." ) filled_url = sansio . format_url ( url , url_vars ) if jwt is not None : request_headers = sansio . create_headers ( self . requester , accept = accept , jwt = jwt ) elif oauth_token is not None : request_headers = sansio . create_headers ( self . requester , accept = accept , oauth_token = oauth_token ) else : request_headers = sansio . create_headers ( self . requester , accept = accept , oauth_token = self . oauth_token ) cached = cacheable = False if data == b"" : body = b"" request_headers [ "content-length" ] = "0" if method == "GET" and self . _cache is not None : cacheable = True try : etag , last_modified , data , more = self . _cache [ filled_url ] cached = True except KeyError : pass else : if etag is not None : request_headers [ "if-none-match" ] = etag if last_modified is not None : request_headers [ "if-modified-since" ] = last_modified else : charset = "utf-8" body = json . dumps ( data ) . encode ( charset ) request_headers [ 'content-type' ] = f"application/json; charset={charset}" request_headers [ 'content-length' ] = str ( len ( body ) ) if self . rate_limit is not None : self . rate_limit . remaining -= 1 response = await self . _request ( method , filled_url , request_headers , body ) if not ( response [ 0 ] == 304 and cached ) : data , self . rate_limit , more = sansio . decipher_response ( * response ) has_cache_details = ( "etag" in response [ 1 ] or "last-modified" in response [ 1 ] ) if self . _cache is not None and cacheable and has_cache_details : etag = response [ 1 ] . get ( "etag" ) last_modified = response [ 1 ] . get ( "last-modified" ) self . _cache [ filled_url ] = etag , last_modified , data , more return data , more | Construct and make an HTTP request . |
47,774 | async def getitem ( self , url : str , url_vars : Dict [ str , str ] = { } , * , accept : str = sansio . accept_format ( ) , jwt : Opt [ str ] = None , oauth_token : Opt [ str ] = None ) -> Any : data , _ = await self . _make_request ( "GET" , url , url_vars , b"" , accept , jwt = jwt , oauth_token = oauth_token ) return data | Send a GET request for a single item to the specified endpoint . |
47,775 | async def getiter ( self , url : str , url_vars : Dict [ str , str ] = { } , * , accept : str = sansio . accept_format ( ) , jwt : Opt [ str ] = None , oauth_token : Opt [ str ] = None ) -> AsyncGenerator [ Any , None ] : data , more = await self . _make_request ( "GET" , url , url_vars , b"" , accept , jwt = jwt , oauth_token = oauth_token ) if isinstance ( data , dict ) and "items" in data : data = data [ "items" ] for item in data : yield item if more : async for item in self . getiter ( more , url_vars , accept = accept , jwt = jwt , oauth_token = oauth_token ) : yield item | Return an async iterable for all the items at a specified endpoint . |
47,776 | def quantize ( self , image ) : if get_cKDTree ( ) : return self . quantize_with_scipy ( image ) else : print ( 'Scipy not available, falling back to slower version.' ) return self . quantize_without_scipy ( image ) | Use a kdtree to quickly find the closest palette colors for the pixels |
47,777 | def inxsearch ( self , r , g , b ) : dists = ( self . colormap [ : , : 3 ] - np . array ( [ r , g , b ] ) ) a = np . argmin ( ( dists * dists ) . sum ( 1 ) ) return a | Search for BGR values 0 .. 255 and return colour index |
47,778 | def gen_filename ( endpoint ) : now = datetime . now ( ) . strftime ( '%Y%m%d_%H%M%S%f' ) [ : - 4 ] base = endpoint . split ( '://' , 1 ) [ 1 ] if base . startswith ( 'localhost:' ) : base = gethostname ( ) . split ( '.' ) [ 0 ] + base [ 9 : ] base = base . replace ( ':' , '_' ) . replace ( '/' , '_' ) return '{}_{}.h5' . format ( base , now ) | Generate a filename from endpoint with timestamp . |
47,779 | def dict_to_hdf5 ( dic , endpoint ) : filename = gen_filename ( endpoint ) with h5py . File ( filename , 'w' ) as handler : walk_dict_to_hdf5 ( dic , handler ) print ( 'dumped to' , filename ) | Dump a dict to an HDF5 file . |
47,780 | def hdf5_to_dict ( filepath , group = '/' ) : if not h5py . is_hdf5 ( filepath ) : raise RuntimeError ( filepath , 'is not a valid HDF5 file.' ) with h5py . File ( filepath , 'r' ) as handler : dic = walk_hdf5_to_dict ( handler [ group ] ) return dic | load the content of an hdf5 file to a dict . |
47,781 | def print_one_train ( client , verbosity = 0 ) : ts_before = time ( ) data , meta = client . next ( ) ts_after = time ( ) if not data : print ( "Empty data" ) return train_id = list ( meta . values ( ) ) [ 0 ] . get ( 'timestamp.tid' , 0 ) print ( "Train ID:" , train_id , "--------------------------" ) delta = ts_after - ts_before print ( 'Data from {} sources, REQ-REP took {:.2f} ms' . format ( len ( data ) , delta ) ) print ( ) for i , ( source , src_data ) in enumerate ( sorted ( data . items ( ) ) , start = 1 ) : src_metadata = meta . get ( source , { } ) tid = src_metadata . get ( 'timestamp.tid' , 0 ) print ( "Source {}: {!r} @ {}" . format ( i , source , tid ) ) try : ts = src_metadata [ 'timestamp' ] except KeyError : print ( "No timestamp" ) else : dt = strftime ( '%Y-%m-%d %H:%M:%S' , localtime ( ts ) ) delay = ( ts_after - ts ) * 1000 print ( 'timestamp: {} ({}) | delay: {:.2f} ms' . format ( dt , ts , delay ) ) if verbosity < 1 : print ( "- data:" , sorted ( src_data ) ) print ( "- metadata:" , sorted ( src_metadata ) ) else : print ( 'data:' ) pretty_print ( src_data , verbosity = verbosity - 1 ) if src_metadata : print ( 'metadata:' ) pretty_print ( src_metadata ) print ( ) return data , meta | Retrieve data for one train and print it . |
47,782 | def pretty_print ( d , ind = '' , verbosity = 0 ) : assert isinstance ( d , dict ) for k , v in sorted ( d . items ( ) ) : str_base = '{} - [{}] {}' . format ( ind , type ( v ) . __name__ , k ) if isinstance ( v , dict ) : print ( str_base . replace ( '-' , '+' , 1 ) ) pretty_print ( v , ind = ind + ' ' , verbosity = verbosity ) continue elif isinstance ( v , np . ndarray ) : node = '{}, {}, {}' . format ( str_base , v . dtype , v . shape ) if verbosity >= 2 : node += '\n{}' . format ( v ) elif isinstance ( v , Sequence ) : if v and isinstance ( v , ( list , tuple ) ) : itemtype = ' of ' + type ( v [ 0 ] ) . __name__ pos = str_base . find ( ']' ) str_base = str_base [ : pos ] + itemtype + str_base [ pos : ] node = '{}, {}' . format ( str_base , v ) if verbosity < 1 and len ( node ) > 80 : node = node [ : 77 ] + '...' else : node = '{}, {}' . format ( str_base , v ) print ( node ) | Pretty print a data dictionary from the bridge client |
47,783 | def start_gen ( port , ser = 'msgpack' , version = '2.2' , detector = 'AGIPD' , raw = False , nsources = 1 , datagen = 'random' , * , debug = True ) : context = zmq . Context ( ) socket = context . socket ( zmq . REP ) socket . setsockopt ( zmq . LINGER , 0 ) socket . bind ( 'tcp://*:{}' . format ( port ) ) if ser != 'msgpack' : raise ValueError ( "Unknown serialisation format %s" % ser ) serialize = partial ( msgpack . dumps , use_bin_type = True ) det = Detector . getDetector ( detector , raw = raw , gen = datagen ) generator = generate ( det , nsources ) print ( 'Simulated Karabo-bridge server started on:\ntcp://{}:{}' . format ( uname ( ) . nodename , port ) ) t_prev = time ( ) n = 0 try : while True : msg = socket . recv ( ) if msg == b'next' : train = next ( generator ) msg = containize ( train , ser , serialize , version ) socket . send_multipart ( msg , copy = False ) if debug : print ( 'Server : emitted train:' , train [ 1 ] [ list ( train [ 1 ] . keys ( ) ) [ 0 ] ] [ 'timestamp.tid' ] ) n += 1 if n % TIMING_INTERVAL == 0 : t_now = time ( ) print ( 'Sent {} trains in {:.2f} seconds ({:.2f} Hz)' '' . format ( TIMING_INTERVAL , t_now - t_prev , TIMING_INTERVAL / ( t_now - t_prev ) ) ) t_prev = t_now else : print ( 'wrong request' ) break except KeyboardInterrupt : print ( '\nStopped.' ) finally : socket . close ( ) context . destroy ( ) | Karabo bridge server simulation . |
47,784 | def next ( self ) : if self . _pattern == zmq . REQ and not self . _recv_ready : self . _socket . send ( b'next' ) self . _recv_ready = True try : msg = self . _socket . recv_multipart ( copy = False ) except zmq . error . Again : raise TimeoutError ( 'No data received from {} in the last {} ms' . format ( self . _socket . getsockopt_string ( zmq . LAST_ENDPOINT ) , self . _socket . getsockopt ( zmq . RCVTIMEO ) ) ) self . _recv_ready = False return self . _deserialize ( msg ) | Request next data container . |
47,785 | def zopen ( filename , * args , ** kwargs ) : if Path is not None and isinstance ( filename , Path ) : filename = str ( filename ) name , ext = os . path . splitext ( filename ) ext = ext . upper ( ) if ext == ".BZ2" : if PY_VERSION [ 0 ] >= 3 : return bz2 . open ( filename , * args , ** kwargs ) else : args = list ( args ) if len ( args ) > 0 : args [ 0 ] = "" . join ( [ c for c in args [ 0 ] if c != "t" ] ) if "mode" in kwargs : kwargs [ "mode" ] = "" . join ( [ c for c in kwargs [ "mode" ] if c != "t" ] ) return bz2 . BZ2File ( filename , * args , ** kwargs ) elif ext in ( ".GZ" , ".Z" ) : return gzip . open ( filename , * args , ** kwargs ) else : return io . open ( filename , * args , ** kwargs ) | This function wraps around the bz2 gzip and standard python s open function to deal intelligently with bzipped gzipped or standard text files . |
47,786 | def reverse_readline ( m_file , blk_size = 4096 , max_mem = 4000000 ) : is_text = isinstance ( m_file , io . TextIOWrapper ) try : file_size = os . path . getsize ( m_file . name ) except AttributeError : file_size = max_mem + 1 if file_size < max_mem or isinstance ( m_file , gzip . GzipFile ) : for line in reversed ( m_file . readlines ( ) ) : yield line . rstrip ( ) else : if isinstance ( m_file , bz2 . BZ2File ) : blk_size = min ( max_mem , file_size ) buf = "" m_file . seek ( 0 , 2 ) if is_text : lastchar = m_file . read ( 1 ) else : lastchar = m_file . read ( 1 ) . decode ( "utf-8" ) trailing_newline = ( lastchar == "\n" ) while 1 : newline_pos = buf . rfind ( "\n" ) pos = m_file . tell ( ) if newline_pos != - 1 : line = buf [ newline_pos + 1 : ] buf = buf [ : newline_pos ] if pos or newline_pos or trailing_newline : line += "\n" yield line elif pos : toread = min ( blk_size , pos ) m_file . seek ( pos - toread , 0 ) if is_text : buf = m_file . read ( toread ) + buf else : buf = m_file . read ( toread ) . decode ( "utf-8" ) + buf m_file . seek ( pos - toread , 0 ) if pos == toread : buf = "\n" + buf else : return | Generator method to read a file line - by - line but backwards . This allows one to efficiently get data at the end of a file . |
47,787 | def get_open_fds ( ) : pid = os . getpid ( ) procs = subprocess . check_output ( [ "lsof" , '-w' , '-Ff' , "-p" , str ( pid ) ] ) procs = procs . decode ( "utf-8" ) return len ( [ s for s in procs . split ( '\n' ) if s and s [ 0 ] == 'f' and s [ 1 : ] . isdigit ( ) ] ) | Return the number of open file descriptors for current process |
47,788 | def acquire ( self ) : start_time = time . time ( ) while True : try : self . fd = os . open ( self . lockfile , os . O_CREAT | os . O_EXCL | os . O_RDWR ) break except ( OSError , ) as e : if e . errno != errno . EEXIST : raise if ( time . time ( ) - start_time ) >= self . timeout : raise FileLockException ( "%s: Timeout occured." % self . lockfile ) time . sleep ( self . delay ) self . is_locked = True | Acquire the lock if possible . If the lock is in use it check again every delay seconds . It does this until it either gets the lock or exceeds timeout number of seconds in which case it throws an exception . |
47,789 | def release ( self ) : if self . is_locked : os . close ( self . fd ) os . unlink ( self . lockfile ) self . is_locked = False | Get rid of the lock by deleting the lockfile . When working in a with statement this gets automatically called at the end . |
47,790 | def filter ( self , names ) : names = list_strings ( names ) fnames = [ ] for f in names : for pat in self . pats : if fnmatch . fnmatch ( f , pat ) : fnames . append ( f ) return fnames | Returns a list with the names matching the pattern . |
47,791 | def match ( self , name ) : for pat in self . pats : if fnmatch . fnmatch ( name , pat ) : return True return False | Returns True if name matches one of the patterns . |
47,792 | def deprecated ( replacement = None , message = None ) : def wrap ( old ) : def wrapped ( * args , ** kwargs ) : msg = "%s is deprecated" % old . __name__ if replacement is not None : if isinstance ( replacement , property ) : r = replacement . fget elif isinstance ( replacement , ( classmethod , staticmethod ) ) : r = replacement . __func__ else : r = replacement msg += "; use %s in %s instead." % ( r . __name__ , r . __module__ ) if message is not None : msg += "\n" + message warnings . simplefilter ( 'default' ) warnings . warn ( msg , DeprecationWarning , stacklevel = 2 ) return old ( * args , ** kwargs ) return wrapped return wrap | Decorator to mark classes or functions as deprecated with a possible replacement . |
47,793 | def install_excepthook ( hook_type = "color" , ** kwargs ) : try : from IPython . core import ultratb except ImportError : import warnings warnings . warn ( "Cannot install excepthook, IPyhon.core.ultratb not available" ) return 1 hook = dict ( color = ultratb . ColorTB , verbose = ultratb . VerboseTB , ) . get ( hook_type . lower ( ) , None ) if hook is None : return 2 import sys sys . excepthook = hook ( ** kwargs ) return 0 | This function replaces the original python traceback with an improved version from Ipython . Use color for colourful traceback formatting verbose for Ka - Ping Yee s cgitb . py version kwargs are the keyword arguments passed to the constructor . See IPython . core . ultratb . py for more info . |
47,794 | def regrep ( filename , patterns , reverse = False , terminate_on_match = False , postprocess = str ) : compiled = { k : re . compile ( v ) for k , v in patterns . items ( ) } matches = collections . defaultdict ( list ) gen = reverse_readfile ( filename ) if reverse else zopen ( filename , "rt" ) for i , l in enumerate ( gen ) : for k , p in compiled . items ( ) : m = p . search ( l ) if m : matches [ k ] . append ( [ [ postprocess ( g ) for g in m . groups ( ) ] , - i if reverse else i ] ) if terminate_on_match and all ( [ len ( matches . get ( k , [ ] ) ) for k in compiled . keys ( ) ] ) : break try : gen . close ( ) except : pass return matches | A powerful regular expression version of grep . |
47,795 | def cached_class ( klass ) : cache = { } @ wraps ( klass , assigned = ( "__name__" , "__module__" ) , updated = ( ) ) class _decorated ( klass ) : __doc__ = klass . __doc__ def __new__ ( cls , * args , ** kwargs ) : key = ( cls , ) + args + tuple ( kwargs . items ( ) ) try : inst = cache . get ( key , None ) except TypeError : inst = key = None if inst is None : inst = klass ( * args , ** kwargs ) inst . __class__ = cls if key is not None : cache [ key ] = inst return inst def __init__ ( self , * args , ** kwargs ) : pass return _decorated | Decorator to cache class instances by constructor arguments . This results in a class that behaves like a singleton for each set of constructor arguments ensuring efficiency . |
47,796 | def operator_from_str ( op ) : d = { "==" : operator . eq , "!=" : operator . ne , ">" : operator . gt , ">=" : operator . ge , "<" : operator . lt , "<=" : operator . le , '+' : operator . add , '-' : operator . sub , '*' : operator . mul , '%' : operator . mod , '^' : operator . xor , } try : d [ '/' ] = operator . truediv except AttributeError : pass return d [ op ] | Return the operator associated to the given string op . |
47,797 | def run ( self , timeout = None , ** kwargs ) : from subprocess import Popen , PIPE def target ( ** kw ) : try : self . process = Popen ( self . command , ** kw ) self . output , self . error = self . process . communicate ( ) self . retcode = self . process . returncode except : import traceback self . error = traceback . format_exc ( ) self . retcode = - 1 if 'stdout' not in kwargs : kwargs [ 'stdout' ] = PIPE if 'stderr' not in kwargs : kwargs [ 'stderr' ] = PIPE import threading thread = threading . Thread ( target = target , kwargs = kwargs ) thread . start ( ) thread . join ( timeout ) if thread . is_alive ( ) : self . process . terminate ( ) self . killed = True thread . join ( ) return self | Run a command in a separated thread and wait timeout seconds . kwargs are keyword arguments passed to Popen . |
47,798 | def marquee ( text = "" , width = 78 , mark = '*' ) : if not text : return ( mark * width ) [ : width ] nmark = ( width - len ( text ) - 2 ) // len ( mark ) // 2 if nmark < 0 : nmark = 0 marks = mark * nmark return '%s %s %s' % ( marks , text , marks ) | Return the input string centered in a marquee . |
47,799 | def boxed ( msg , ch = "=" , pad = 5 ) : if pad > 0 : msg = pad * ch + " " + msg . strip ( ) + " " + pad * ch return "\n" . join ( [ len ( msg ) * ch , msg , len ( msg ) * ch , ] ) | Returns a string in a box |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.