idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
235,600
def get_fields ( config ) : for data in config [ 'scraping' ] [ 'data' ] : if data [ 'field' ] != '' : yield data [ 'field' ] if 'next' in config [ 'scraping' ] : for n in config [ 'scraping' ] [ 'next' ] : for f in get_fields ( n ) : yield f
Recursive generator that yields the field names in the config file
85
12
235,601
def extract_fieldnames ( config ) : fields = [ ] for x in get_fields ( config ) : if x in fields : fields . append ( x + '_' + str ( fields . count ( x ) + 1 ) ) else : fields . append ( x ) return fields
Function to return a list of unique field names from the config file
60
13
235,602
def run ( self , dag : DAGCircuit ) -> DAGCircuit : circ = dagcircuit_to_tk ( dag , _DROP_CONDS = self . DROP_CONDS , _BOX_UNKNOWN = self . BOX_UNKNOWN ) circ , circlay = self . process_circ ( circ ) newdag = tk_to_dagcircuit ( circ ) newdag . name = dag . name finlay = dict ( ) for i , qi in enumerate ( circlay ) : finlay [ ( 'q' , i ) ] = ( 'q' , qi ) newdag . final_layout = finlay return newdag
Run one pass of optimisation on the circuit and route for the given backend .
151
16
235,603
def _sort_row_col ( qubits : Iterator [ GridQubit ] ) -> List [ GridQubit ] : return sorted ( qubits , key = lambda x : ( x . row , x . col ) )
Sort grid qubits first by row then by column
50
10
235,604
def print_setting ( self ) -> str : ret = "\n" ret += "==================== Setting of {} ============================\n" . format ( self . configuration [ 'name' ] ) ret += "{}" . format ( self . setting ) ret += "===============================================================\n" ret += "{}" . format ( self . _var_form . setting ) ret += "===============================================================\n" return ret
Presents the QSE settings as a string .
90
10
235,605
def _energy_evaluation ( self , operator ) : if self . _quantum_state is not None : input_circuit = self . _quantum_state else : input_circuit = [ self . opt_circuit ] if operator . _paulis : mean_energy , std_energy = operator . evaluate_with_result ( self . _operator_mode , input_circuit , self . _quantum_instance . backend , self . ret ) else : mean_energy = 0.0 std_energy = 0.0 operator . disable_summarize_circuits ( ) logger . debug ( 'Energy evaluation {} returned {}' . format ( self . _eval_count , np . real ( mean_energy ) ) ) return np . real ( mean_energy ) , np . real ( std_energy )
Evaluate the energy of the current input circuit with respect to the given operator .
178
17
235,606
def _run ( self ) -> dict : if not self . _quantum_instance . is_statevector : raise AquaError ( "Can only calculate state for QSE with statevector backends" ) ret = self . _quantum_instance . execute ( self . opt_circuit ) self . ret = ret self . _eval_count = 0 self . _solve ( ) self . _ret [ 'eval_count' ] = self . _eval_count self . _ret [ 'eval_time' ] = self . _eval_time return self . _ret
Runs the QSE algorithm to compute the eigenvalues of the Hamiltonian .
123
17
235,607
def whooshee_search ( self , search_string , group = whoosh . qparser . OrGroup , whoosheer = None , match_substrings = True , limit = None , order_by_relevance = 10 ) : if not whoosheer : ### inspiration taken from flask-WhooshAlchemy # find out all entities in join entities = set ( ) # directly queried entities for cd in self . column_descriptions : entities . add ( cd [ 'type' ] ) # joined entities if self . _join_entities and isinstance ( self . _join_entities [ 0 ] , Mapper ) : # SQLAlchemy >= 0.8.0 entities . update ( set ( [ x . entity for x in self . _join_entities ] ) ) else : # SQLAlchemy < 0.8.0 entities . update ( set ( self . _join_entities ) ) # make sure we can work with aliased entities unaliased = set ( ) for entity in entities : if isinstance ( entity , ( AliasedClass , AliasedInsp ) ) : unaliased . add ( inspect ( entity ) . mapper . class_ ) else : unaliased . add ( entity ) whoosheer = next ( w for w in _get_config ( self ) [ 'whoosheers' ] if set ( w . models ) == unaliased ) # TODO what if unique field doesn't exist or there are multiple? for fname , field in list ( whoosheer . schema . _fields . items ( ) ) : if field . unique : uniq = fname # TODO: use something more general than id res = whoosheer . search ( search_string = search_string , values_of = uniq , group = group , match_substrings = match_substrings , limit = limit ) if not res : return self . filter ( text ( 'null' ) ) # transform unique field name into model attribute field attr = None if hasattr ( whoosheer , '_is_model_whoosheer' ) : attr = getattr ( whoosheer . models [ 0 ] , uniq ) else : # non-model whoosheers must have unique field named # model.__name__.lower + '_' + attr for m in whoosheer . models : if m . __name__ . lower ( ) == uniq . split ( '_' ) [ 0 ] : attr = getattr ( m , uniq . split ( '_' ) [ 1 ] ) search_query = self . filter ( attr . in_ ( res ) ) if order_by_relevance < 0 : # we want all returned rows ordered search_query = search_query . order_by ( sqlalchemy . sql . expression . case ( [ ( attr == uniq_val , index ) for index , uniq_val in enumerate ( res ) ] , ) ) elif order_by_relevance > 0 : # we want only number of specified rows ordered search_query = search_query . order_by ( sqlalchemy . sql . expression . case ( [ ( attr == uniq_val , index ) for index , uniq_val in enumerate ( res ) if index < order_by_relevance ] , else_ = order_by_relevance ) ) else : # no ordering pass return search_query
Do a fulltext search on the query . Returns a query filtered with results of the fulltext search .
746
21
235,608
def search ( cls , search_string , values_of = '' , group = whoosh . qparser . OrGroup , match_substrings = True , limit = None ) : index = Whooshee . get_or_create_index ( _get_app ( cls ) , cls ) prepped_string = cls . prep_search_string ( search_string , match_substrings ) with index . searcher ( ) as searcher : parser = whoosh . qparser . MultifieldParser ( cls . schema . names ( ) , index . schema , group = group ) query = parser . parse ( prepped_string ) results = searcher . search ( query , limit = limit ) if values_of : return [ x [ values_of ] for x in results ] return results
Searches the fields for given search_string . Returns the found records if values_of is left empty else the values of the given columns .
173
30
235,609
def create_index ( cls , app , wh ) : # TODO: do we really want/need to use camel casing? # everywhere else, there is just .lower() if app . extensions [ 'whooshee' ] [ 'memory_storage' ] : storage = RamStorage ( ) index = storage . create_index ( wh . schema ) assert index return index else : index_path = os . path . join ( app . extensions [ 'whooshee' ] [ 'index_path_root' ] , getattr ( wh , 'index_subdir' , cls . camel_to_snake ( wh . __name__ ) ) ) if whoosh . index . exists_in ( index_path ) : index = whoosh . index . open_dir ( index_path ) else : if not os . path . exists ( index_path ) : os . makedirs ( index_path ) index = whoosh . index . create_in ( index_path , wh . schema ) return index
Creates and opens an index for the given whoosheer and app . If the index already exists it just opens it otherwise it creates it first .
217
31
235,610
def get_or_create_index ( cls , app , wh ) : if wh in app . extensions [ 'whooshee' ] [ 'whoosheers_indexes' ] : return app . extensions [ 'whooshee' ] [ 'whoosheers_indexes' ] [ wh ] index = cls . create_index ( app , wh ) app . extensions [ 'whooshee' ] [ 'whoosheers_indexes' ] [ wh ] = index return index
Gets a previously cached index or creates a new one for the given app and whoosheer .
108
21
235,611
def on_commit ( self , changes ) : if _get_config ( self ) [ 'enable_indexing' ] is False : return None for wh in self . whoosheers : if not wh . auto_update : continue writer = None for change in changes : if change [ 0 ] . __class__ in wh . models : method_name = '{0}_{1}' . format ( change [ 1 ] , change [ 0 ] . __class__ . __name__ . lower ( ) ) method = getattr ( wh , method_name , None ) if method : if not writer : writer = type ( self ) . get_or_create_index ( _get_app ( self ) , wh ) . writer ( timeout = _get_config ( self ) [ 'writer_timeout' ] ) method ( writer , change [ 0 ] ) if writer : writer . commit ( )
Method that gets called when a model is changed . This serves to do the actual index writing .
191
19
235,612
def reindex ( self ) : for wh in self . whoosheers : index = type ( self ) . get_or_create_index ( _get_app ( self ) , wh ) writer = index . writer ( timeout = _get_config ( self ) [ 'writer_timeout' ] ) for model in wh . models : method_name = "{0}_{1}" . format ( UPDATE_KWD , model . __name__ . lower ( ) ) for item in model . query . all ( ) : getattr ( wh , method_name ) ( writer , item ) writer . commit ( )
Reindex all data
130
4
235,613
def dump_info ( ) : vultr = Vultr ( API_KEY ) try : logging . info ( 'Listing account info:\n%s' , dumps ( vultr . account . info ( ) , indent = 2 ) ) logging . info ( 'Listing apps:\n%s' , dumps ( vultr . app . list ( ) , indent = 2 ) ) logging . info ( 'Listing backups:\n%s' , dumps ( vultr . backup . list ( ) , indent = 2 ) ) logging . info ( 'Listing DNS:\n%s' , dumps ( vultr . dns . list ( ) , indent = 2 ) ) logging . info ( 'Listing ISOs:\n%s' , dumps ( vultr . iso . list ( ) , indent = 2 ) ) logging . info ( 'Listing OSs:\n%s' , dumps ( vultr . os . list ( ) , indent = 2 ) ) logging . info ( 'Listing plans:\n%s' , dumps ( vultr . plans . list ( ) , indent = 2 ) ) logging . info ( 'Listing regions:\n%s' , dumps ( vultr . regions . list ( ) , indent = 2 ) ) logging . info ( 'Listing servers:\n%s' , dumps ( vultr . server . list ( ) , indent = 2 ) ) logging . info ( 'Listing snapshots:\n%s' , dumps ( vultr . snapshot . list ( ) , indent = 2 ) ) logging . info ( 'Listing SSH keys:\n%s' , dumps ( vultr . sshkey . list ( ) , indent = 2 ) ) logging . info ( 'Listing startup scripts:\n%s' , dumps ( vultr . startupscript . list ( ) , indent = 2 ) ) except VultrError as ex : logging . error ( 'VultrError: %s' , ex )
Shows various details about the account & servers
425
9
235,614
def update_params ( params , updates ) : params = params . copy ( ) if isinstance ( params , dict ) else dict ( ) params . update ( updates ) return params
Merges updates into params
37
5
235,615
def _request_get_helper ( self , url , params = None ) : if not isinstance ( params , dict ) : params = dict ( ) if self . api_key : params [ 'api_key' ] = self . api_key return requests . get ( url , params = params , timeout = 60 )
API GET request helper
69
4
235,616
def _request_post_helper ( self , url , params = None ) : if self . api_key : query = { 'api_key' : self . api_key } return requests . post ( url , params = query , data = params , timeout = 60 )
API POST helper
59
3
235,617
def _request_helper ( self , url , params , method ) : try : if method == 'POST' : return self . _request_post_helper ( url , params ) elif method == 'GET' : return self . _request_get_helper ( url , params ) raise VultrError ( 'Unsupported method %s' % method ) except requests . RequestException as ex : raise RuntimeError ( ex )
API request helper method
93
4
235,618
def halt_running ( ) : vultr = Vultr ( API_KEY ) try : serverList = vultr . server . list ( ) #logging.info('Listing servers:\n%s', dumps( #serverList, indent=2 #)) except VultrError as ex : logging . error ( 'VultrError: %s' , ex ) for serverID in serverList : if serverList [ serverID ] [ 'power_status' ] == 'running' : logging . info ( serverList [ serverID ] [ 'label' ] + " will be gracefully shutdown." ) vultr . server . halt ( serverID )
Halts all running servers
143
5
235,619
def tag_arxiv ( line ) : def tagger ( match ) : groups = match . groupdict ( ) if match . group ( 'suffix' ) : groups [ 'suffix' ] = ' ' + groups [ 'suffix' ] else : groups [ 'suffix' ] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s' u'%(month)s.%(num)s%(suffix)s' u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits . sub ( tagger , line ) line = re_arxiv . sub ( tagger , line ) line = re_new_arxiv_5digits . sub ( tagger , line ) line = re_new_arxiv . sub ( tagger , line ) return line
Tag arxiv report numbers
195
6
235,620
def tag_arxiv_more ( line ) : line = RE_ARXIV_CATCHUP . sub ( ur"\g<suffix>/\g<year>\g<month>\g<num>" , line ) for report_re , report_repl in RE_OLD_ARXIV : report_number = report_repl + ur"/\g<num>" line = report_re . sub ( u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>' , line ) return line
Tag old arxiv report numbers
128
7
235,621
def tag_pos_volume ( line ) : def tagger ( match ) : groups = match . groupdict ( ) try : year = match . group ( 'year' ) except IndexError : # Extract year from volume name # which should always include the year g = re . search ( re_pos_year_num , match . group ( 'volume_num' ) , re . UNICODE ) year = g . group ( 0 ) if year : groups [ 'year' ] = ' <cds.YR>(%s)</cds.YR>' % year . strip ( ) . strip ( '()' ) else : groups [ 'year' ] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' '%(year)s' ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos : line = p . sub ( tagger , line ) return line
Tag POS volume number
241
4
235,622
def find_numeration_more ( line ) : # First, attempt to use marked-up titles patterns = ( re_correct_numeration_2nd_try_ptn1 , re_correct_numeration_2nd_try_ptn2 , re_correct_numeration_2nd_try_ptn3 , re_correct_numeration_2nd_try_ptn4 , ) for pattern in patterns : match = pattern . search ( line ) if match : info = match . groupdict ( ) series = extract_series_from_volume ( info [ 'vol' ] ) if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt' ] if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt2' ] return { 'year' : info . get ( 'year' , None ) , 'series' : series , 'volume' : info [ 'vol_num' ] , 'page' : info [ 'page' ] or info [ 'jinst_page' ] , 'page_end' : info [ 'page_end' ] , 'len' : len ( info [ 'aftertitle' ] ) } return None
Look for other numeration in line .
285
8
235,623
def identify_ibids ( line ) : ibid_match_txt = { } # Record details of each matched ibid: for m_ibid in re_ibid . finditer ( line ) : ibid_match_txt [ m_ibid . start ( ) ] = m_ibid . group ( 0 ) # Replace matched text in line with underscores: line = line [ 0 : m_ibid . start ( ) ] + "_" * len ( m_ibid . group ( 0 ) ) + line [ m_ibid . end ( ) : ] return ibid_match_txt , line
Find IBIDs within the line record their position and length and replace them with underscores .
132
17
235,624
def find_numeration ( line ) : patterns = ( # vol,page,year re_numeration_vol_page_yr , re_numeration_vol_nucphys_page_yr , re_numeration_nucphys_vol_page_yr , # With sub volume re_numeration_vol_subvol_nucphys_yr_page , re_numeration_vol_nucphys_yr_subvol_page , # vol,year,page re_numeration_vol_yr_page , re_numeration_nucphys_vol_yr_page , re_numeration_vol_nucphys_series_yr_page , # vol,page,year re_numeration_vol_series_nucphys_page_yr , re_numeration_vol_nucphys_series_page_yr , # year,vol,page re_numeration_yr_vol_page , ) for pattern in patterns : match = pattern . match ( line ) if match : info = match . groupdict ( ) series = info . get ( 'series' , None ) if not series : series = extract_series_from_volume ( info [ 'vol' ] ) if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt' ] if not info [ 'vol_num' ] : info [ 'vol_num' ] = info [ 'vol_num_alt2' ] return { 'year' : info . get ( 'year' , None ) , 'series' : series , 'volume' : info [ 'vol_num' ] , 'page' : info [ 'page' ] or info [ 'jinst_page' ] , 'page_end' : info [ 'page_end' ] , 'len' : match . end ( ) } return None
Given a reference line attempt to locate instances of citation numeration in the line .
420
16
235,625
def remove_reference_line_marker ( line ) : # Get patterns to identify reference-line marker patterns: marker_patterns = get_reference_line_numeration_marker_patterns ( ) line = line . lstrip ( ) marker_match = regex_match_list ( line , marker_patterns ) if marker_match is not None : # found a marker: marker_val = marker_match . group ( u'mark' ) # trim the marker from the start of the line: line = line [ marker_match . end ( ) : ] . lstrip ( ) else : marker_val = u" " return ( marker_val , line )
Trim a reference line s marker from the beginning of the line .
145
14
235,626
def roman2arabic ( num ) : t = 0 p = 0 for r in num : n = 10 ** ( 205558 % ord ( r ) % 7 ) % 9995 t += n - 2 * p % n p = n return t
Convert numbers from roman to arabic
55
10
235,627
def format_report_number ( citation_elements ) : re_report = re . compile ( ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$' , re . UNICODE ) for el in citation_elements : if el [ 'type' ] == 'REPORTNUMBER' : m = re_report . match ( el [ 'report_num' ] ) if m : name = m . group ( 'name' ) if not name . endswith ( '-' ) : el [ 'report_num' ] = m . group ( 'name' ) + '-' + m . group ( 'nums' ) return citation_elements
Format report numbers that are missing a dash
156
8
235,628
def format_hep ( citation_elements ) : prefixes = ( 'astro-ph-' , 'hep-th-' , 'hep-ph-' , 'hep-ex-' , 'hep-lat-' , 'math-ph-' ) for el in citation_elements : if el [ 'type' ] == 'REPORTNUMBER' : for p in prefixes : if el [ 'report_num' ] . startswith ( p ) : el [ 'report_num' ] = el [ 'report_num' ] [ : len ( p ) - 1 ] + '/' + el [ 'report_num' ] [ len ( p ) : ] return citation_elements
Format hep - th report numbers with a dash
153
9
235,629
def look_for_books ( citation_elements , kbs ) : title = None for el in citation_elements : if el [ 'type' ] == 'QUOTED' : title = el break if title : normalized_title = title [ 'title' ] . upper ( ) if normalized_title in kbs [ 'books' ] : line = kbs [ 'books' ] [ normalized_title ] el = { 'type' : 'BOOK' , 'misc_txt' : '' , 'authors' : line [ 0 ] , 'title' : line [ 1 ] , 'year' : line [ 2 ] . strip ( ';' ) } citation_elements . append ( el ) citation_elements . remove ( title ) return citation_elements
Look for books in our kb
165
6
235,630
def split_volume_from_journal ( citation_elements ) : for el in citation_elements : if el [ 'type' ] == 'JOURNAL' and ';' in el [ 'title' ] : el [ 'title' ] , series = el [ 'title' ] . rsplit ( ';' , 1 ) el [ 'volume' ] = series + el [ 'volume' ] return citation_elements
Split volume from journal title
94
5
235,631
def remove_b_for_nucl_phys ( citation_elements ) : for el in citation_elements : if el [ 'type' ] == 'JOURNAL' and el [ 'title' ] == 'Nucl.Phys.Proc.Suppl.' and 'volume' in el and ( el [ 'volume' ] . startswith ( 'b' ) or el [ 'volume' ] . startswith ( 'B' ) ) : el [ 'volume' ] = el [ 'volume' ] [ 1 : ] return citation_elements
Removes b from the volume of some journals
123
9
235,632
def mangle_volume ( citation_elements ) : volume_re = re . compile ( ur"(\d+)([A-Z])" , re . U | re . I ) for el in citation_elements : if el [ 'type' ] == 'JOURNAL' : matches = volume_re . match ( el [ 'volume' ] ) if matches : el [ 'volume' ] = matches . group ( 2 ) + matches . group ( 1 ) return citation_elements
Make sure the volume letter is before the volume number
107
10
235,633
def split_citations ( citation_elements ) : splitted_citations = [ ] new_elements = [ ] current_recid = None current_doi = None def check_ibid ( current_elements , trigger_el ) : for el in new_elements : if el [ 'type' ] == 'AUTH' : return # Check for ibid if trigger_el . get ( 'is_ibid' , False ) : if splitted_citations : els = chain ( reversed ( current_elements ) , reversed ( splitted_citations [ - 1 ] ) ) else : els = reversed ( current_elements ) for el in els : if el [ 'type' ] == 'AUTH' : new_elements . append ( el . copy ( ) ) break def start_new_citation ( ) : """Start new citation""" splitted_citations . append ( new_elements [ : ] ) del new_elements [ : ] for el in citation_elements : try : el_recid = el [ 'recid' ] except KeyError : el_recid = None if current_recid and el_recid and current_recid == el_recid : # Do not start a new citation pass elif current_recid and el_recid and current_recid != el_recid or current_doi and el [ 'type' ] == 'DOI' and current_doi != el [ 'doi_string' ] : start_new_citation ( ) # Some authors may be found in the previous citation balance_authors ( splitted_citations , new_elements ) elif ';' in el [ 'misc_txt' ] : misc_txt , el [ 'misc_txt' ] = el [ 'misc_txt' ] . split ( ';' , 1 ) if misc_txt : new_elements . append ( { 'type' : 'MISC' , 'misc_txt' : misc_txt } ) start_new_citation ( ) # In case el['recid'] is None, we want to reset it # because we are starting a new reference current_recid = el_recid while ';' in el [ 'misc_txt' ] : misc_txt , el [ 'misc_txt' ] = el [ 'misc_txt' ] . split ( ';' , 1 ) if misc_txt : new_elements . append ( { 'type' : 'MISC' , 'misc_txt' : misc_txt } ) start_new_citation ( ) current_recid = None if el_recid : current_recid = el_recid if el [ 'type' ] == 'DOI' : current_doi = el [ 'doi_string' ] check_ibid ( new_elements , el ) new_elements . append ( el ) splitted_citations . append ( new_elements ) return [ el for el in splitted_citations if not empty_citation ( el ) ]
Split a citation line in multiple citations
665
7
235,634
def look_for_hdl ( citation_elements ) : for el in list ( citation_elements ) : matched_hdl = re_hdl . finditer ( el [ 'misc_txt' ] ) for match in reversed ( list ( matched_hdl ) ) : hdl_el = { 'type' : 'HDL' , 'hdl_id' : match . group ( 'hdl_id' ) , 'misc_txt' : el [ 'misc_txt' ] [ match . end ( ) : ] } el [ 'misc_txt' ] = el [ 'misc_txt' ] [ 0 : match . start ( ) ] citation_elements . insert ( citation_elements . index ( el ) + 1 , hdl_el )
Looks for handle identifiers in the misc txt of the citation elements
168
13
235,635
def look_for_hdl_urls ( citation_elements ) : for el in citation_elements : if el [ 'type' ] == 'URL' : match = re_hdl . match ( el [ 'url_string' ] ) if match : el [ 'type' ] = 'HDL' el [ 'hdl_id' ] = match . group ( 'hdl_id' ) del el [ 'url_desc' ] del el [ 'url_string' ]
Looks for handle identifiers that have already been identified as urls
108
12
235,636
def parse_reference_line ( ref_line , kbs , bad_titles_count = { } , linker_callback = None ) : # Strip the 'marker' (e.g. [1]) from this reference line: line_marker , ref_line = remove_reference_line_marker ( ref_line ) # Find DOI sections in citation ref_line , identified_dois = identify_and_tag_DOI ( ref_line ) # Identify and replace URLs in the line: ref_line , identified_urls = identify_and_tag_URLs ( ref_line ) # Tag <cds.JOURNAL>, etc. tagged_line , bad_titles_count = tag_reference_line ( ref_line , kbs , bad_titles_count ) # Debug print tagging (authors, titles, volumes, etc.) LOGGER . debug ( "tags %r" , tagged_line ) # Using the recorded information, create a MARC XML representation # of the rebuilt line: # At the same time, get stats of citations found in the reference line # (titles, urls, etc): citation_elements , line_marker , counts = parse_tagged_reference_line ( line_marker , tagged_line , identified_dois , identified_urls ) # Transformations on elements split_volume_from_journal ( citation_elements ) format_volume ( citation_elements ) handle_special_journals ( citation_elements , kbs ) format_report_number ( citation_elements ) format_author_ed ( citation_elements ) look_for_books ( citation_elements , kbs ) format_hep ( citation_elements ) remove_b_for_nucl_phys ( citation_elements ) mangle_volume ( citation_elements ) arxiv_urls_to_report_numbers ( citation_elements ) look_for_hdl ( citation_elements ) look_for_hdl_urls ( citation_elements ) # Link references if desired if linker_callback : associate_recids ( citation_elements , linker_callback ) # Split the reference in multiple ones if needed splitted_citations = split_citations ( citation_elements ) # Look for implied ibids look_for_implied_ibids ( splitted_citations ) # Find year add_year_elements ( splitted_citations ) # Look for books in misc field look_for_undetected_books ( splitted_citations , kbs ) if linker_callback : # Link references with the newly added ibids/books information for citations in splitted_citations : associate_recids ( citations , linker_callback ) # FIXME: Needed? # Remove references with only misc text # splitted_citations = remove_invalid_references(splitted_citations) # Merge references with only misc text # splitted_citations = merge_invalid_references(splitted_citations) remove_duplicated_authors ( splitted_citations ) remove_duplicated_dois ( splitted_citations ) remove_duplicated_collaborations ( splitted_citations ) add_recid_elements ( splitted_citations ) # For debugging purposes print_citations ( splitted_citations , line_marker ) return splitted_citations , line_marker , counts , bad_titles_count
Parse one reference line
765
5
235,637
def search_for_book_in_misc ( citation , kbs ) : citation_year = year_from_citation ( citation ) for citation_element in citation : LOGGER . debug ( u"Searching for book title in: %s" , citation_element [ 'misc_txt' ] ) for title in kbs [ 'books' ] : startIndex = find_substring_ignore_special_chars ( citation_element [ 'misc_txt' ] , title ) if startIndex != - 1 : line = kbs [ 'books' ] [ title . upper ( ) ] book_year = line [ 2 ] . strip ( ';' ) book_authors = line [ 0 ] book_found = False if citation_year == book_year : # For now consider the citation as valid, we are using # an exact search, we don't need to check the authors # However, the code below will be useful if we decide # to introduce fuzzy matching. book_found = True for author in get_possible_author_names ( citation ) : if find_substring_ignore_special_chars ( book_authors , author ) != - 1 : book_found = True for author in re . findall ( '[a-zA-Z]{4,}' , book_authors ) : if find_substring_ignore_special_chars ( citation_element [ 'misc_txt' ] , author ) != - 1 : book_found = True if book_found : LOGGER . debug ( u"Book found: %s" , title ) book_element = { 'type' : 'BOOK' , 'misc_txt' : '' , 'authors' : book_authors , 'title' : line [ 1 ] , 'year' : book_year } citation . append ( book_element ) citation_element [ 'misc_txt' ] = cut_substring_with_special_chars ( citation_element [ 'misc_txt' ] , title , startIndex ) # Remove year from misc txt citation_element [ 'misc_txt' ] = remove_year ( citation_element [ 'misc_txt' ] , book_year ) return True LOGGER . debug ( "Book not found!" ) return False
Searches for books in the misc_txt field if the citation is not recognized as anything like a journal book etc .
484
25
235,638
def map_tag_to_subfield ( tag_type , line , cur_misc_txt , dest ) : closing_tag = '</cds.%s>' % tag_type # extract the institutional report-number from the line: idx_closing_tag = line . find ( closing_tag ) # Sanity check - did we find a closing tag? if idx_closing_tag == - 1 : # no closing </cds.TAG> tag found - strip the opening tag and move past this # recognised reportnumber as it is unreliable: identified_citation_element = None line = line [ len ( '<cds.%s>' % tag_type ) : ] else : tag_content = line [ : idx_closing_tag ] identified_citation_element = { 'type' : tag_type , 'misc_txt' : cur_misc_txt , dest : tag_content } ending_tag_pos = idx_closing_tag + len ( closing_tag ) line = line [ ending_tag_pos : ] cur_misc_txt = u"" return identified_citation_element , line , cur_misc_txt
Create a new reference element
257
5
235,639
def remove_leading_garbage_lines_from_reference_section ( ref_sectn ) : p_email = re . compile ( ur'^\s*e\-?mail' , re . UNICODE ) while ref_sectn and ( ref_sectn [ 0 ] . isspace ( ) or p_email . match ( ref_sectn [ 0 ] ) ) : ref_sectn . pop ( 0 ) return ref_sectn
Sometimes the first lines of the extracted references are completely blank or email addresses . These must be removed as they are not references .
98
25
235,640
def get_plaintext_document_body ( fpath , keep_layout = False ) : textbody = [ ] mime_type = magic . from_file ( fpath , mime = True ) if mime_type == "text/plain" : with open ( fpath , "r" ) as f : textbody = [ line . decode ( "utf-8" ) for line in f . readlines ( ) ] elif mime_type == "application/pdf" : textbody = convert_PDF_to_plaintext ( fpath , keep_layout ) else : raise UnknownDocumentTypeError ( mime_type ) return textbody
Given a file - path to a full - text return a list of unicode strings whereby each string is a line of the fulltext . In the case of a plain - text document this simply means reading the contents in from the file . In the case of a PDF however this means converting the document to plaintext . It raises UnknownDocumentTypeError if the document is not a PDF or plain text .
141
81
235,641
def parse_references ( reference_lines , recid = None , override_kbs_files = None , reference_format = u"{title} {volume} ({year}) {page}" , linker_callback = None ) : # RefExtract knowledge bases kbs = get_kbs ( custom_kbs_files = override_kbs_files ) # Identify journal titles, report numbers, URLs, DOIs, and authors... processed_references , counts , dummy_bad_titles_count = parse_references_elements ( reference_lines , kbs , linker_callback ) return ( build_references ( processed_references , reference_format ) , build_stats ( counts ) )
Parse a list of references
156
6
235,642
def build_stats ( counts ) : stats = { 'status' : 0 , 'reportnum' : counts [ 'reportnum' ] , 'title' : counts [ 'title' ] , 'author' : counts [ 'auth_group' ] , 'url' : counts [ 'url' ] , 'doi' : counts [ 'doi' ] , 'misc' : counts [ 'misc' ] , } stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats stats [ "old_stats_str" ] = stats_str stats [ "date" ] = datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) stats [ "version" ] = version return stats
Return stats information from counts structure .
196
7
235,643
def replace_undesirable_characters ( line ) : # These are separate because we want a particular order for bad_string , replacement in UNDESIRABLE_STRING_REPLACEMENTS : line = line . replace ( bad_string , replacement ) for bad_char , replacement in iteritems ( UNDESIRABLE_CHAR_REPLACEMENTS ) : line = line . replace ( bad_char , replacement ) return line
Replace certain bad characters in a text line .
96
10
235,644
def convert_PDF_to_plaintext ( fpath , keep_layout = False ) : if not os . path . isfile ( CFG_PATH_PDFTOTEXT ) : raise IOError ( 'Missing pdftotext executable' ) if keep_layout : layout_option = "-layout" else : layout_option = "-raw" doclines = [ ] # Pattern to check for lines with a leading page-break character. # If this pattern is matched, we want to split the page-break into # its own line because we rely upon this for trying to strip headers # and footers, and for some other pattern matching. p_break_in_line = re . compile ( ur'^\s*\f(.+)$' , re . UNICODE ) # build pdftotext command: cmd_pdftotext = [ CFG_PATH_PDFTOTEXT , layout_option , "-q" , "-enc" , "UTF-8" , fpath , "-" ] LOGGER . debug ( u"%s" , ' ' . join ( cmd_pdftotext ) ) # open pipe to pdftotext: pipe_pdftotext = subprocess . Popen ( cmd_pdftotext , stdout = subprocess . PIPE ) # read back results: for docline in pipe_pdftotext . stdout : unicodeline = docline . decode ( "utf-8" ) # Check for a page-break in this line: m_break_in_line = p_break_in_line . match ( unicodeline ) if m_break_in_line is None : # There was no page-break in this line. Just add the line: doclines . append ( unicodeline ) else : # If there was a page-break character in the same line as some # text, split it out into its own line so that we can later # try to find headers and footers: doclines . append ( u"\f" ) doclines . append ( m_break_in_line . group ( 1 ) ) LOGGER . debug ( u"convert_PDF_to_plaintext found: %s lines of text" , len ( doclines ) ) return doclines
Convert PDF to txt using pdftotext
490
12
235,645
def get_author_affiliation_numeration_str ( punct = None ) : # FIXME cater for start or end numeration (ie two puncs) # Number to look for, either general or specific re_number = r'(?:\d\d?)' re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number # Punctuation surrounding the number, either general or specific again if punct is None : re_punct = r"(?:[\{\(\[]?)" else : re_punct = re . escape ( punct ) # Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!) numeration_str = r""" (?:\s*(%(punct)s)\s* ## Left numeration punctuation (%(num)s\s* ## Core numeration item, either specific or generic %(num_chain)s ## Extra numeration, either generic or empty ) (?:(%(punct)s)) ## Right numeration punctuation )""" % { 'num' : re_number , 'num_chain' : re_chained_numbers , 'punct' : re_punct } return numeration_str
The numeration which can be applied to author names . Numeration is sometimes found next to authors of papers .
280
23
235,646
def find_reference_section ( docbody ) : ref_details = None title_patterns = get_reference_section_title_patterns ( ) # Try to find refs section title: for title_pattern in title_patterns : # Look for title pattern in docbody for reversed_index , line in enumerate ( reversed ( docbody ) ) : title_match = title_pattern . match ( line ) if title_match : title = title_match . group ( 'title' ) index = len ( docbody ) - 1 - reversed_index temp_ref_details , found_title = find_numeration ( docbody [ index : index + 6 ] , title ) if temp_ref_details : if ref_details and 'title' in ref_details and ref_details [ 'title' ] and not temp_ref_details [ 'title' ] : continue if ref_details and 'marker' in ref_details and ref_details [ 'marker' ] and not temp_ref_details [ 'marker' ] : continue ref_details = temp_ref_details ref_details [ 'start_line' ] = index ref_details [ 'title_string' ] = title if found_title : break if ref_details : break return ref_details
Search in document body for its reference section .
276
9
235,647
def find_numeration ( docbody , title ) : ref_details , found_title = find_numeration_in_title ( docbody , title ) if not ref_details : ref_details , found_title = find_numeration_in_body ( docbody ) return ref_details , found_title
Find numeration pattern
71
4
235,648
def get_reference_lines ( docbody , ref_sect_start_line , ref_sect_end_line , ref_sect_title , ref_line_marker_ptn , title_marker_same_line ) : start_idx = ref_sect_start_line if title_marker_same_line : # Title on same line as 1st ref- take title out! title_start = docbody [ start_idx ] . find ( ref_sect_title ) if title_start != - 1 : # Set the first line with no title docbody [ start_idx ] = docbody [ start_idx ] [ title_start + len ( ref_sect_title ) : ] elif ref_sect_title is not None : # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None : ref_lines = docbody [ start_idx : ref_sect_end_line + 1 ] else : ref_lines = docbody [ start_idx : ] if ref_sect_title : ref_lines = strip_footer ( ref_lines , ref_sect_title ) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines ( ref_lines , ref_line_marker_ptn )
After the reference section of a document has been identified and the first and last lines of the reference section have been recorded this function is called to take the reference lines out of the document body . The document s reference lines are returned in a list of strings whereby each string is a reference line . Before this can be done however the reference section is passed to another function that rebuilds any broken reference lines .
314
80
235,649
def match_pagination ( ref_line ) : pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$' re_footer = re . compile ( pattern , re . UNICODE ) match = re_footer . match ( ref_line ) if match : return int ( match . group ( 1 ) ) return None
Remove footer pagination from references lines
86
8
235,650
def strip_footer ( ref_lines , section_title ) : pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re . escape ( section_title ) re_footer = re . compile ( pattern , re . UNICODE ) return [ l for l in ref_lines if not re_footer . match ( l ) ]
Remove footer title from references lines
95
7
235,651
def extract_references_from_url ( url , headers = None , chunk_size = 1024 , * * kwargs ) : # Get temporary filepath to download to filename , filepath = mkstemp ( suffix = u"_{0}" . format ( os . path . basename ( url ) ) , ) os . close ( filename ) try : req = requests . get ( url = url , headers = headers , stream = True ) req . raise_for_status ( ) with open ( filepath , 'wb' ) as f : for chunk in req . iter_content ( chunk_size ) : f . write ( chunk ) references = extract_references_from_file ( filepath , * * kwargs ) except requests . exceptions . HTTPError : raise FullTextNotAvailableError ( u"URL not found: '{0}'" . format ( url ) ) , None , sys . exc_info ( ) [ 2 ] finally : os . remove ( filepath ) return references
Extract references from the pdf specified in the url .
212
11
235,652
def extract_references_from_file ( path , recid = None , reference_format = u"{title} {volume} ({year}) {page}" , linker_callback = None , override_kbs_files = None ) : if not os . path . isfile ( path ) : raise FullTextNotAvailableError ( u"File not found: '{0}'" . format ( path ) ) docbody = get_plaintext_document_body ( path ) reflines , dummy , dummy = extract_references_from_fulltext ( docbody ) if not reflines : docbody = get_plaintext_document_body ( path , keep_layout = True ) reflines , dummy , dummy = extract_references_from_fulltext ( docbody ) parsed_refs , stats = parse_references ( reflines , recid = recid , reference_format = reference_format , linker_callback = linker_callback , override_kbs_files = override_kbs_files , ) if magic . from_file ( path , mime = True ) == "application/pdf" : texkeys = extract_texkeys_from_pdf ( path ) if len ( texkeys ) == len ( parsed_refs ) : parsed_refs = [ dict ( ref , texkey = [ key ] ) for ref , key in izip ( parsed_refs , texkeys ) ] return parsed_refs
Extract references from a local pdf file .
312
9
235,653
def extract_references_from_string ( source , is_only_references = True , recid = None , reference_format = "{title} {volume} ({year}) {page}" , linker_callback = None , override_kbs_files = None ) : docbody = source . split ( '\n' ) if not is_only_references : reflines , dummy , dummy = extract_references_from_fulltext ( docbody ) else : refs_info = get_reference_section_beginning ( docbody ) if not refs_info : refs_info , dummy = find_numeration_in_body ( docbody ) refs_info [ 'start_line' ] = 0 refs_info [ 'end_line' ] = len ( docbody ) - 1 , reflines = rebuild_reference_lines ( docbody , refs_info [ 'marker_pattern' ] ) parsed_refs , stats = parse_references ( reflines , recid = recid , reference_format = reference_format , linker_callback = linker_callback , override_kbs_files = override_kbs_files , ) return parsed_refs
Extract references from a raw string .
264
8
235,654
def extract_journal_reference ( line , override_kbs_files = None ) : kbs = get_kbs ( custom_kbs_files = override_kbs_files ) references , dummy_m , dummy_c , dummy_co = parse_reference_line ( line , kbs ) for elements in references : for el in elements : if el [ 'type' ] == 'JOURNAL' : return el
Extract the journal reference from string .
93
8
235,655
def build_references ( citations , reference_format = False ) : # Now, run the method which will take as input: # 1. A list of lists of dictionaries, where each dictionary is a piece # of citation information corresponding to a tag in the citation. # 2. The line marker for this entire citation line (mulitple citation # 'finds' inside a single citation will use the same marker value) # The resulting xml line will be a properly marked up form of the # citation. It will take into account authors to try and split up # references which should be read as two SEPARATE ones. return [ c for citation_elements in citations for elements in citation_elements [ 'elements' ] for c in build_reference_fields ( elements , citation_elements [ 'line_marker' ] , citation_elements [ 'raw_ref' ] , reference_format ) ]
Build list of reference dictionaries from a references list
194
10
235,656
def build_reference_fields ( citation_elements , line_marker , raw_ref , reference_format ) : # Begin the datafield element current_field = create_reference_field ( line_marker ) current_field [ 'raw_ref' ] = [ raw_ref ] reference_fields = [ current_field ] for element in citation_elements : # Before going onto checking 'what' the next element is, # handle misc text and semi-colons # Multiple misc text subfields will be compressed later # This will also be the only part of the code that deals with MISC # tag_typed elements misc_txt = element [ 'misc_txt' ] if misc_txt . strip ( "., [](){}" ) : misc_txt = misc_txt . lstrip ( '])} ,.' ) . rstrip ( '[({ ,.' ) add_subfield ( current_field , 'misc' , misc_txt ) # Now handle the type dependent actions # JOURNAL if element [ 'type' ] == "JOURNAL" : add_journal_subfield ( current_field , element , reference_format ) # REPORT NUMBER elif element [ 'type' ] == "REPORTNUMBER" : add_subfield ( current_field , 'reportnumber' , element [ 'report_num' ] ) # URL elif element [ 'type' ] == "URL" : if element [ 'url_string' ] == element [ 'url_desc' ] : # Build the datafield for the URL segment of the reference # line: add_subfield ( current_field , 'url' , element [ 'url_string' ] ) # Else, in the case that the url string and the description differ # in some way, include them both else : add_subfield ( current_field , 'url' , element [ 'url_string' ] ) add_subfield ( current_field , 'urldesc' , element [ 'url_desc' ] ) # DOI elif element [ 'type' ] == "DOI" : add_subfield ( current_field , 'doi' , 'doi:' + element [ 'doi_string' ] ) # HDL elif element [ 'type' ] == "HDL" : add_subfield ( current_field , 'hdl' , 'hdl:' + element [ 'hdl_id' ] ) # AUTHOR elif element [ 'type' ] == "AUTH" : value = element [ 'auth_txt' ] if element [ 'auth_type' ] == 'incl' : value = "(%s)" % value add_subfield ( current_field , 'author' , value ) elif element [ 'type' ] == "QUOTED" : add_subfield ( current_field , 'title' , element [ 'title' ] ) elif element [ 'type' ] == "ISBN" : add_subfield ( current_field , 'isbn' , element [ 'ISBN' ] ) elif element [ 'type' ] == "BOOK" : add_subfield ( current_field , 'title' , element [ 'title' ] ) elif element [ 'type' ] == "PUBLISHER" : add_subfield ( current_field , 'publisher' , element [ 'publisher' ] ) elif element [ 'type' ] == "YEAR" : add_subfield ( current_field , 'year' , element [ 'year' ] ) elif element [ 'type' ] == "COLLABORATION" : add_subfield ( current_field , 'collaboration' , element [ 'collaboration' ] ) elif element [ 'type' ] == "RECID" : add_subfield ( current_field , 'recid' , str ( element [ 'recid' ] ) ) return reference_fields
Create the final representation of the reference information .
842
9
235,657
def extract_texkeys_from_pdf ( pdf_file ) : with open ( pdf_file , 'rb' ) as pdf_stream : try : pdf = PdfFileReader ( pdf_stream , strict = False ) destinations = pdf . getNamedDestinations ( ) except Exception : LOGGER . debug ( u"PDF: Internal PyPDF2 error, no TeXkeys returned." ) return [ ] # not all named destinations point to references refs = [ dest for dest in destinations . iteritems ( ) if re_reference_in_dest . match ( dest [ 0 ] ) ] try : if _destinations_in_two_columns ( pdf , refs ) : LOGGER . debug ( u"PDF: Using two-column layout" ) def sortfunc ( dest_couple ) : return _destination_position ( pdf , dest_couple [ 1 ] ) else : LOGGER . debug ( u"PDF: Using single-column layout" ) def sortfunc ( dest_couple ) : ( page , _ , ypos , xpos ) = _destination_position ( pdf , dest_couple [ 1 ] ) return ( page , ypos , xpos ) refs . sort ( key = sortfunc ) # extract the TeXkey from the named destination name return [ re_reference_in_dest . match ( destname ) . group ( 1 ) for ( destname , _ ) in refs ] except Exception : LOGGER . debug ( u"PDF: Impossible to determine layout, no TeXkeys returned" ) return [ ]
Extract the texkeys from the given PDF file
334
10
235,658
def get_reference_line_numeration_marker_patterns ( prefix = u'' ) : title = u"" if type ( prefix ) in ( str , unicode ) : title = prefix g_name = u'(?P<mark>' g_close = u')' space = ur'\s*' patterns = [ # [1] space + title + g_name + ur'\[\s*(?P<marknum>\d+)\s*\]' + g_close , # [<letters and numbers] space + title + g_name + ur'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close , # {1} space + title + g_name + ur'\{\s*(?P<marknum>\d+)\s*\}' + g_close , # (1) space + title + g_name + ur'\<\s*(?P<marknum>\d+)\s*\>' + g_close , space + title + g_name + ur'\(\s*(?P<marknum>\d+)\s*\)' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\.(?!\d)' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s+' + g_close , space + title + g_name + ur'(?P<marknum>\d+)\s*\]' + g_close , # 1] space + title + g_name + ur'(?P<marknum>\d+)\s*\}' + g_close , # 1} space + title + g_name + ur'(?P<marknum>\d+)\s*\)' + g_close , # 1) space + title + g_name + ur'(?P<marknum>\d+)\s*\>' + g_close , # [1.1] space + title + g_name + ur'\[\s*\d+\.\d+\s*\]' + g_close , # [ ] space + title + g_name + ur'\[\s*\]' + g_close , # * space + title + g_name + ur'\*' + g_close , ] return [ re . compile ( p , re . I | re . UNICODE ) for p in patterns ]
Return a list of compiled regex patterns used to search for the marker of a reference line in a full - text document .
591
24
235,659
def get_post_reference_section_keyword_patterns ( ) : compiled_patterns = [ ] patterns = [ u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'prepared' ) + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'created' ) + ur').*(AAS\s*)?\sLATEX' , ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'macros' ) + u'v' , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'This paper has been produced using' ) , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'This article was processed by the author using Springer-Verlag' ) + u' LATEX' ] for p in patterns : compiled_patterns . append ( re . compile ( p , re . I | re . UNICODE ) ) return compiled_patterns
Return a list of compiled regex patterns used to search for various keywords that can often be found after and therefore suggest the end of a reference section in a full - text document .
289
35
235,660
def regex_match_list ( line , patterns ) : m = None for ptn in patterns : m = ptn . match ( line ) if m is not None : break return m
Given a list of COMPILED regex patters perform the re . match operation on the line for every pattern . Break from searching at the first match returning the match object . In the case that no patterns match the None type will be returned .
39
49
235,661
def get_url_repair_patterns ( ) : file_types_list = [ ur'h\s*t\s*m' , # htm ur'h\s*t\s*m\s*l' , # html ur't\s*x\s*t' # txt ur'p\s*h\s*p' # php ur'a\s*s\s*p\s*' # asp ur'j\s*s\s*p' , # jsp ur'p\s*y' , # py (python) ur'p\s*l' , # pl (perl) ur'x\s*m\s*l' , # xml ur'j\s*p\s*g' , # jpg ur'g\s*i\s*f' # gif ur'm\s*o\s*v' # mov ur's\s*w\s*f' # swf ur'p\s*d\s*f' # pdf ur'p\s*s' # ps ur'd\s*o\s*c' , # doc ur't\s*e\s*x' , # tex ur's\s*h\s*t\s*m\s*l' , # shtml ] pattern_list = [ ur'(h\s*t\s*t\s*p\s*\:\s*\/\s*\/)' , ur'(f\s*t\s*p\s*\:\s*\/\s*\/\s*)' , ur'((http|ftp):\/\/\s*[\w\d])' , ur'((http|ftp):\/\/([\w\d\s\._\-])+?\s*\/)' , ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)+)' , ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)*([\w\d\_\s\-]+\.\s?[\w\d]+))' , ] pattern_list = [ re . compile ( p , re . I | re . UNICODE ) for p in pattern_list ] # some possible endings for URLs: p = ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*([\w\d\_\-]+\.%s))' for extension in file_types_list : p_url = re . compile ( p % extension , re . I | re . UNICODE ) pattern_list . append ( p_url ) # if url last thing in line, and only 10 letters max, concat them p_url = re . compile ( r'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*\s*?([\w\d\_\.\-]\s?){1,10}\s*)$' , re . I | re . UNICODE ) pattern_list . append ( p_url ) return pattern_list
Initialise and return a list of precompiled regexp patterns that are used to try to re - assemble URLs that have been broken during a document s conversion to plain - text .
749
37
235,662
def join_lines ( line1 , line2 ) : if line1 == u"" : pass elif line1 [ - 1 ] == u'-' : # hyphenated word at the end of the # line - don't add in a space and remove hyphen line1 = line1 [ : - 1 ] elif line1 [ - 1 ] != u' ' : # no space at the end of this # line, add in a space line1 = line1 + u' ' return line1 + line2
Join 2 lines of text
109
5
235,663
def repair_broken_urls ( line ) : def _chop_spaces_in_url_match ( m ) : """Suppresses spaces in a matched URL.""" return m . group ( 1 ) . replace ( " " , "" ) for ptn in re_list_url_repair_patterns : line = ptn . sub ( _chop_spaces_in_url_match , line ) return line
Attempt to repair broken URLs in a line of text .
92
11
235,664
def remove_and_record_multiple_spaces_in_line ( line ) : removed_spaces = { } # get a collection of match objects for all instances of # multiple-spaces found in the line: multispace_matches = re_group_captured_multiple_space . finditer ( line ) # record the number of spaces found at each match position: for multispace in multispace_matches : removed_spaces [ multispace . start ( ) ] = ( multispace . end ( ) - multispace . start ( ) - 1 ) # now remove the multiple-spaces from the line, replacing with a # single space at each position: line = re_group_captured_multiple_space . sub ( u' ' , line ) return ( removed_spaces , line )
For a given string locate all ocurrences of multiple spaces together in the line record the number of spaces found at each position and replace them with a single space .
179
33
235,665
def remove_page_boundary_lines ( docbody ) : number_head_lines = number_foot_lines = 0 # Make sure document not just full of whitespace: if not document_contains_text ( docbody ) : # document contains only whitespace - cannot safely # strip headers/footers return docbody # Get list of index posns of pagebreaks in document: page_break_posns = get_page_break_positions ( docbody ) # Get num lines making up each header if poss: number_head_lines = get_number_header_lines ( docbody , page_break_posns ) # Get num lines making up each footer if poss: number_foot_lines = get_number_footer_lines ( docbody , page_break_posns ) # Remove pagebreaks,headers,footers: docbody = strip_headers_footers_pagebreaks ( docbody , page_break_posns , number_head_lines , number_foot_lines ) return docbody
Try to locate page breaks headers and footers within a document body and remove the array cells at which they are found .
221
24
235,666
def get_page_break_positions ( docbody ) : page_break_posns = [ ] p_break = re . compile ( ur'^\s*\f\s*$' , re . UNICODE ) num_document_lines = len ( docbody ) for i in xrange ( num_document_lines ) : if p_break . match ( docbody [ i ] ) is not None : page_break_posns . append ( i ) return page_break_posns
Locate page breaks in the list of document lines and create a list positions in the document body list .
110
21
235,667
def get_number_header_lines ( docbody , page_break_posns ) : remaining_breaks = len ( page_break_posns ) - 1 num_header_lines = empty_line = 0 # pattern to search for a word in a line: p_wordSearch = re . compile ( ur'([A-Za-z0-9-]+)' , re . UNICODE ) if remaining_breaks > 2 : if remaining_breaks > 3 : # Only check odd page headers next_head = 2 else : # Check headers on each page next_head = 1 keep_checking = 1 while keep_checking : cur_break = 1 if docbody [ ( page_break_posns [ cur_break ] + num_header_lines + 1 ) ] . isspace ( ) : # this is a blank line empty_line = 1 if ( page_break_posns [ cur_break ] + num_header_lines + 1 ) == ( page_break_posns [ ( cur_break + 1 ) ] ) : # Have reached next page-break: document has no # body - only head/footers! keep_checking = 0 grps_headLineWords = p_wordSearch . findall ( docbody [ ( page_break_posns [ cur_break ] + num_header_lines + 1 ) ] ) cur_break = cur_break + next_head while ( cur_break < remaining_breaks ) and keep_checking : lineno = page_break_posns [ cur_break ] + num_header_lines + 1 if lineno >= len ( docbody ) : keep_checking = 0 break grps_thisLineWords = p_wordSearch . findall ( docbody [ lineno ] ) if empty_line : if len ( grps_thisLineWords ) != 0 : # This line should be empty, but isn't keep_checking = 0 else : if ( len ( grps_thisLineWords ) == 0 ) or ( len ( grps_headLineWords ) != len ( grps_thisLineWords ) ) : # Not same num 'words' as equivilent line # in 1st header: keep_checking = 0 else : keep_checking = check_boundary_lines_similar ( grps_headLineWords , grps_thisLineWords ) # Update cur_break for nxt line to check cur_break = cur_break + next_head if keep_checking : # Line is a header line: check next num_header_lines = num_header_lines + 1 empty_line = 0 return num_header_lines
Try to guess the number of header lines each page of a document has . The positions of the page breaks in the document are used to try to guess the number of header lines .
562
36
235,668
def get_number_footer_lines ( docbody , page_break_posns ) : num_breaks = len ( page_break_posns ) num_footer_lines = 0 empty_line = 0 keep_checking = 1 p_wordSearch = re . compile ( unicode ( r'([A-Za-z0-9-]+)' ) , re . UNICODE ) if num_breaks > 2 : while keep_checking : cur_break = 1 if page_break_posns [ cur_break ] - num_footer_lines - 1 < 0 or page_break_posns [ cur_break ] - num_footer_lines - 1 > len ( docbody ) - 1 : # Be sure that the docbody list boundary wasn't overstepped: break if docbody [ ( page_break_posns [ cur_break ] - num_footer_lines - 1 ) ] . isspace ( ) : empty_line = 1 grps_headLineWords = p_wordSearch . findall ( docbody [ ( page_break_posns [ cur_break ] - num_footer_lines - 1 ) ] ) cur_break = cur_break + 1 while ( cur_break < num_breaks ) and keep_checking : grps_thisLineWords = p_wordSearch . findall ( docbody [ ( page_break_posns [ cur_break ] - num_footer_lines - 1 ) ] ) if empty_line : if len ( grps_thisLineWords ) != 0 : # this line should be empty, but isn't keep_checking = 0 else : if ( len ( grps_thisLineWords ) == 0 ) or ( len ( grps_headLineWords ) != len ( grps_thisLineWords ) ) : # Not same num 'words' as equivilent line # in 1st footer: keep_checking = 0 else : keep_checking = check_boundary_lines_similar ( grps_headLineWords , grps_thisLineWords ) # Update cur_break for nxt line to check cur_break = cur_break + 1 if keep_checking : # Line is a footer line: check next num_footer_lines = num_footer_lines + 1 empty_line = 0 return num_footer_lines
Try to guess the number of footer lines each page of a document has . The positions of the page breaks in the document are used to try to guess the number of footer lines .
507
38
235,669
def strip_headers_footers_pagebreaks ( docbody , page_break_posns , num_head_lines , num_foot_lines ) : num_breaks = len ( page_break_posns ) page_lens = [ ] for x in xrange ( 0 , num_breaks ) : if x < num_breaks - 1 : page_lens . append ( page_break_posns [ x + 1 ] - page_break_posns [ x ] ) page_lens . sort ( ) if ( len ( page_lens ) > 0 ) and ( num_head_lines + num_foot_lines + 1 < page_lens [ 0 ] ) : # Safe to chop hdrs & ftrs page_break_posns . reverse ( ) first = 1 for i in xrange ( 0 , len ( page_break_posns ) ) : # Unless this is the last page break, chop headers if not first : for dummy in xrange ( 1 , num_head_lines + 1 ) : docbody [ page_break_posns [ i ] + 1 : page_break_posns [ i ] + 2 ] = [ ] else : first = 0 # Chop page break itself docbody [ page_break_posns [ i ] : page_break_posns [ i ] + 1 ] = [ ] # Chop footers (unless this is the first page break) if i != len ( page_break_posns ) - 1 : for dummy in xrange ( 1 , num_foot_lines + 1 ) : docbody [ page_break_posns [ i ] - num_foot_lines : page_break_posns [ i ] - num_foot_lines + 1 ] = [ ] return docbody
Remove page - break lines header lines and footer lines from the document .
381
15
235,670
def check_boundary_lines_similar ( l_1 , l_2 ) : num_matches = 0 if ( type ( l_1 ) != list ) or ( type ( l_2 ) != list ) or ( len ( l_1 ) != len ( l_2 ) ) : # these 'boundaries' are not similar return 0 num_elements = len ( l_1 ) for i in xrange ( 0 , num_elements ) : if l_1 [ i ] . isdigit ( ) and l_2 [ i ] . isdigit ( ) : # both lines are integers num_matches += 1 else : l1_str = l_1 [ i ] . lower ( ) l2_str = l_2 [ i ] . lower ( ) if ( l1_str [ 0 ] == l2_str [ 0 ] ) and ( l1_str [ len ( l1_str ) - 1 ] == l2_str [ len ( l2_str ) - 1 ] ) : num_matches = num_matches + 1 if ( len ( l_1 ) == 0 ) or ( float ( num_matches ) / float ( len ( l_1 ) ) < 0.9 ) : return 0 else : return 1
Compare two lists to see if their elements are roughly the same .
274
13
235,671
def make_cache_key ( custom_kbs_files = None ) : if custom_kbs_files : serialized_args = ( '%s=%s' % v for v in iteritems ( custom_kbs_files ) ) serialized_args = ';' . join ( serialized_args ) else : serialized_args = "default" cache_key = md5 ( serialized_args ) . digest ( ) return cache_key
Create cache key for kbs caches instances
100
8
235,672
def create_institute_numeration_group_regexp_pattern ( patterns ) : patterns_list = [ institute_num_pattern_to_regex ( p [ 1 ] ) for p in patterns ] grouped_numeration_pattern = u"(?P<numn>%s)" % u'|' . join ( patterns_list ) return grouped_numeration_pattern
Using a list of regexp patterns for recognising numeration patterns for institute preprint references ordered by length - longest to shortest - create a grouped OR or of these patterns ready to be used in a bigger regexp .
86
44
235,673
def _cmp_bystrlen_reverse ( a , b ) : if len ( a ) > len ( b ) : return - 1 elif len ( a ) < len ( b ) : return 1 else : return 0
A private cmp function to be used by the sort function of a list when ordering the titles found in a knowledge base by string - length - LONGEST - > SHORTEST .
47
37
235,674
def build_special_journals_kb ( fpath ) : journals = set ( ) with file_resolving ( fpath ) as fh : for line in fh : # Skip commented lines if line . startswith ( '#' ) : continue # Skip empty line if not line . strip ( ) : continue journals . add ( line . strip ( ) ) return journals
Load special journals database from file
80
6
235,675
def build_journals_re_kb ( fpath ) : def make_tuple ( match ) : regexp = match . group ( 'seek' ) repl = match . group ( 'repl' ) return regexp , repl kb = [ ] with file_resolving ( fpath ) as fh : for rawline in fh : if rawline . startswith ( '#' ) : continue # Extract the seek->replace terms from this KB line: m_kb_line = re_kb_line . search ( rawline ) kb . append ( make_tuple ( m_kb_line ) ) return kb
Load journals regexps knowledge base
134
6
235,676
def _parse_content_type ( content_type : Optional [ str ] ) -> Tuple [ Optional [ str ] , str ] : if not content_type : return None , "utf-8" else : type_ , parameters = cgi . parse_header ( content_type ) encoding = parameters . get ( "charset" , "utf-8" ) return type_ , encoding
Tease out the content - type and character encoding .
84
11
235,677
def _decode_body ( content_type : Optional [ str ] , body : bytes , * , strict : bool = False ) -> Any : type_ , encoding = _parse_content_type ( content_type ) if not len ( body ) or not content_type : return None decoded_body = body . decode ( encoding ) if type_ == "application/json" : return json . loads ( decoded_body ) elif type_ == "application/x-www-form-urlencoded" : return json . loads ( urllib . parse . parse_qs ( decoded_body ) [ "payload" ] [ 0 ] ) elif strict : raise ValueError ( f"unrecognized content type: {type_!r}" ) return decoded_body
Decode an HTTP body based on the specified content type .
169
12
235,678
def validate_event ( payload : bytes , * , signature : str , secret : str ) -> None : # https://developer.github.com/webhooks/securing/#validating-payloads-from-github signature_prefix = "sha1=" if not signature . startswith ( signature_prefix ) : raise ValidationFailure ( "signature does not start with " f"{repr(signature_prefix)}" ) hmac_ = hmac . new ( secret . encode ( "UTF-8" ) , msg = payload , digestmod = "sha1" ) calculated_sig = signature_prefix + hmac_ . hexdigest ( ) if not hmac . compare_digest ( signature , calculated_sig ) : raise ValidationFailure ( "payload's signature does not align " "with the secret" )
Validate the signature of a webhook event .
183
10
235,679
def accept_format ( * , version : str = "v3" , media : Optional [ str ] = None , json : bool = True ) -> str : # https://developer.github.com/v3/media/ # https://developer.github.com/v3/#current-version accept = f"application/vnd.github.{version}" if media is not None : accept += f".{media}" if json : accept += "+json" return accept
Construct the specification of the format that a request should return .
102
12
235,680
def create_headers ( requester : str , * , accept : str = accept_format ( ) , oauth_token : Optional [ str ] = None , jwt : Optional [ str ] = None ) -> Dict [ str , str ] : # user-agent: https://developer.github.com/v3/#user-agent-required # accept: https://developer.github.com/v3/#current-version # https://developer.github.com/v3/media/ # authorization: https://developer.github.com/v3/#authentication # authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app if oauth_token is not None and jwt is not None : raise ValueError ( "Cannot pass both oauth_token and jwt." ) headers = { "user-agent" : requester , "accept" : accept } if oauth_token is not None : headers [ "authorization" ] = f"token {oauth_token}" elif jwt is not None : headers [ "authorization" ] = f"bearer {jwt}" return headers
Create a dict representing GitHub - specific header fields .
272
10
235,681
def decipher_response ( status_code : int , headers : Mapping [ str , str ] , body : bytes ) -> Tuple [ Any , Optional [ RateLimit ] , Optional [ str ] ] : data = _decode_body ( headers . get ( "content-type" ) , body ) if status_code in { 200 , 201 , 204 } : return data , RateLimit . from_http ( headers ) , _next_link ( headers . get ( "link" ) ) else : try : message = data [ "message" ] except ( TypeError , KeyError ) : message = None exc_type : Type [ HTTPException ] if status_code >= 500 : exc_type = GitHubBroken elif status_code >= 400 : exc_type = BadRequest if status_code == 403 : rate_limit = RateLimit . from_http ( headers ) if rate_limit and not rate_limit . remaining : raise RateLimitExceeded ( rate_limit , message ) elif status_code == 422 : errors = data . get ( "errors" , None ) if errors : fields = ", " . join ( repr ( e [ "field" ] ) for e in errors ) message = f"{message} for {fields}" else : message = data [ "message" ] raise InvalidField ( errors , message ) elif status_code >= 300 : exc_type = RedirectionException else : exc_type = HTTPException status_code_enum = http . HTTPStatus ( status_code ) args : Union [ Tuple [ http . HTTPStatus , str ] , Tuple [ http . HTTPStatus ] ] if message : args = status_code_enum , message else : args = status_code_enum , raise exc_type ( * args )
Decipher an HTTP response for a GitHub API request .
375
11
235,682
def format_url ( url : str , url_vars : Mapping [ str , Any ] ) -> str : url = urllib . parse . urljoin ( DOMAIN , url ) # Works even if 'url' is fully-qualified. expanded_url : str = uritemplate . expand ( url , var_dict = url_vars ) return expanded_url
Construct a URL for the GitHub API .
80
8
235,683
def from_http ( cls , headers : Mapping [ str , str ] , body : bytes , * , secret : Optional [ str ] = None ) -> "Event" : if "x-hub-signature" in headers : if secret is None : raise ValidationFailure ( "secret not provided" ) validate_event ( body , signature = headers [ "x-hub-signature" ] , secret = secret ) elif secret is not None : raise ValidationFailure ( "signature is missing" ) try : data = _decode_body ( headers [ "content-type" ] , body , strict = True ) except ( KeyError , ValueError ) as exc : raise BadRequest ( http . HTTPStatus ( 415 ) , "expected a content-type of " "'application/json' or " "'application/x-www-form-urlencoded'" ) from exc return cls ( data , event = headers [ "x-github-event" ] , delivery_id = headers [ "x-github-delivery" ] )
Construct an event from HTTP headers and JSON body data .
223
11
235,684
def from_http ( cls , headers : Mapping [ str , str ] ) -> Optional [ "RateLimit" ] : try : limit = int ( headers [ "x-ratelimit-limit" ] ) remaining = int ( headers [ "x-ratelimit-remaining" ] ) reset_epoch = float ( headers [ "x-ratelimit-reset" ] ) except KeyError : return None else : return cls ( limit = limit , remaining = remaining , reset_epoch = reset_epoch )
Gather rate limit information from HTTP headers .
111
9
235,685
def add ( self , func : AsyncCallback , event_type : str , * * data_detail : Any ) -> None : if len ( data_detail ) > 1 : msg = ( ) raise TypeError ( "dispatching based on data details is only " "supported up to one level deep; " f"{len(data_detail)} levels specified" ) elif not data_detail : callbacks = self . _shallow_routes . setdefault ( event_type , [ ] ) callbacks . append ( func ) else : data_key , data_value = data_detail . popitem ( ) data_details = self . _deep_routes . setdefault ( event_type , { } ) specific_detail = data_details . setdefault ( data_key , { } ) callbacks = specific_detail . setdefault ( data_value , [ ] ) callbacks . append ( func )
Add a new route .
199
5
235,686
async def _make_request ( self , method : str , url : str , url_vars : Dict [ str , str ] , data : Any , accept : str , jwt : Opt [ str ] = None , oauth_token : Opt [ str ] = None , ) -> Tuple [ bytes , Opt [ str ] ] : if oauth_token is not None and jwt is not None : raise ValueError ( "Cannot pass both oauth_token and jwt." ) filled_url = sansio . format_url ( url , url_vars ) if jwt is not None : request_headers = sansio . create_headers ( self . requester , accept = accept , jwt = jwt ) elif oauth_token is not None : request_headers = sansio . create_headers ( self . requester , accept = accept , oauth_token = oauth_token ) else : # fallback to using oauth_token request_headers = sansio . create_headers ( self . requester , accept = accept , oauth_token = self . oauth_token ) cached = cacheable = False # Can't use None as a "no body" sentinel as it's a legitimate JSON type. if data == b"" : body = b"" request_headers [ "content-length" ] = "0" if method == "GET" and self . _cache is not None : cacheable = True try : etag , last_modified , data , more = self . _cache [ filled_url ] cached = True except KeyError : pass else : if etag is not None : request_headers [ "if-none-match" ] = etag if last_modified is not None : request_headers [ "if-modified-since" ] = last_modified else : charset = "utf-8" body = json . dumps ( data ) . encode ( charset ) request_headers [ 'content-type' ] = f"application/json; charset={charset}" request_headers [ 'content-length' ] = str ( len ( body ) ) if self . rate_limit is not None : self . rate_limit . remaining -= 1 response = await self . _request ( method , filled_url , request_headers , body ) if not ( response [ 0 ] == 304 and cached ) : data , self . rate_limit , more = sansio . decipher_response ( * response ) has_cache_details = ( "etag" in response [ 1 ] or "last-modified" in response [ 1 ] ) if self . _cache is not None and cacheable and has_cache_details : etag = response [ 1 ] . get ( "etag" ) last_modified = response [ 1 ] . get ( "last-modified" ) self . _cache [ filled_url ] = etag , last_modified , data , more return data , more
Construct and make an HTTP request .
629
7
235,687
async def getitem ( self , url : str , url_vars : Dict [ str , str ] = { } , * , accept : str = sansio . accept_format ( ) , jwt : Opt [ str ] = None , oauth_token : Opt [ str ] = None ) -> Any : data , _ = await self . _make_request ( "GET" , url , url_vars , b"" , accept , jwt = jwt , oauth_token = oauth_token ) return data
Send a GET request for a single item to the specified endpoint .
114
13
235,688
async def getiter ( self , url : str , url_vars : Dict [ str , str ] = { } , * , accept : str = sansio . accept_format ( ) , jwt : Opt [ str ] = None , oauth_token : Opt [ str ] = None ) -> AsyncGenerator [ Any , None ] : data , more = await self . _make_request ( "GET" , url , url_vars , b"" , accept , jwt = jwt , oauth_token = oauth_token ) if isinstance ( data , dict ) and "items" in data : data = data [ "items" ] for item in data : yield item if more : # `yield from` is not supported in coroutines. async for item in self . getiter ( more , url_vars , accept = accept , jwt = jwt , oauth_token = oauth_token ) : yield item
Return an async iterable for all the items at a specified endpoint .
206
14
235,689
def quantize ( self , image ) : if get_cKDTree ( ) : return self . quantize_with_scipy ( image ) else : print ( 'Scipy not available, falling back to slower version.' ) return self . quantize_without_scipy ( image )
Use a kdtree to quickly find the closest palette colors for the pixels
65
15
235,690
def inxsearch ( self , r , g , b ) : dists = ( self . colormap [ : , : 3 ] - np . array ( [ r , g , b ] ) ) a = np . argmin ( ( dists * dists ) . sum ( 1 ) ) return a
Search for BGR values 0 .. 255 and return colour index
65
12
235,691
def gen_filename ( endpoint ) : now = datetime . now ( ) . strftime ( '%Y%m%d_%H%M%S%f' ) [ : - 4 ] base = endpoint . split ( '://' , 1 ) [ 1 ] if base . startswith ( 'localhost:' ) : base = gethostname ( ) . split ( '.' ) [ 0 ] + base [ 9 : ] base = base . replace ( ':' , '_' ) . replace ( '/' , '_' ) return '{}_{}.h5' . format ( base , now )
Generate a filename from endpoint with timestamp .
131
9
235,692
def dict_to_hdf5 ( dic , endpoint ) : filename = gen_filename ( endpoint ) with h5py . File ( filename , 'w' ) as handler : walk_dict_to_hdf5 ( dic , handler ) print ( 'dumped to' , filename )
Dump a dict to an HDF5 file .
64
11
235,693
def hdf5_to_dict ( filepath , group = '/' ) : if not h5py . is_hdf5 ( filepath ) : raise RuntimeError ( filepath , 'is not a valid HDF5 file.' ) with h5py . File ( filepath , 'r' ) as handler : dic = walk_hdf5_to_dict ( handler [ group ] ) return dic
load the content of an hdf5 file to a dict .
90
13
235,694
def print_one_train ( client , verbosity = 0 ) : ts_before = time ( ) data , meta = client . next ( ) ts_after = time ( ) if not data : print ( "Empty data" ) return train_id = list ( meta . values ( ) ) [ 0 ] . get ( 'timestamp.tid' , 0 ) print ( "Train ID:" , train_id , "--------------------------" ) delta = ts_after - ts_before print ( 'Data from {} sources, REQ-REP took {:.2f} ms' . format ( len ( data ) , delta ) ) print ( ) for i , ( source , src_data ) in enumerate ( sorted ( data . items ( ) ) , start = 1 ) : src_metadata = meta . get ( source , { } ) tid = src_metadata . get ( 'timestamp.tid' , 0 ) print ( "Source {}: {!r} @ {}" . format ( i , source , tid ) ) try : ts = src_metadata [ 'timestamp' ] except KeyError : print ( "No timestamp" ) else : dt = strftime ( '%Y-%m-%d %H:%M:%S' , localtime ( ts ) ) delay = ( ts_after - ts ) * 1000 print ( 'timestamp: {} ({}) | delay: {:.2f} ms' . format ( dt , ts , delay ) ) if verbosity < 1 : print ( "- data:" , sorted ( src_data ) ) print ( "- metadata:" , sorted ( src_metadata ) ) else : print ( 'data:' ) pretty_print ( src_data , verbosity = verbosity - 1 ) if src_metadata : print ( 'metadata:' ) pretty_print ( src_metadata ) print ( ) return data , meta
Retrieve data for one train and print it .
399
10
235,695
def pretty_print ( d , ind = '' , verbosity = 0 ) : assert isinstance ( d , dict ) for k , v in sorted ( d . items ( ) ) : str_base = '{} - [{}] {}' . format ( ind , type ( v ) . __name__ , k ) if isinstance ( v , dict ) : print ( str_base . replace ( '-' , '+' , 1 ) ) pretty_print ( v , ind = ind + ' ' , verbosity = verbosity ) continue elif isinstance ( v , np . ndarray ) : node = '{}, {}, {}' . format ( str_base , v . dtype , v . shape ) if verbosity >= 2 : node += '\n{}' . format ( v ) elif isinstance ( v , Sequence ) : if v and isinstance ( v , ( list , tuple ) ) : itemtype = ' of ' + type ( v [ 0 ] ) . __name__ pos = str_base . find ( ']' ) str_base = str_base [ : pos ] + itemtype + str_base [ pos : ] node = '{}, {}' . format ( str_base , v ) if verbosity < 1 and len ( node ) > 80 : node = node [ : 77 ] + '...' else : node = '{}, {}' . format ( str_base , v ) print ( node )
Pretty print a data dictionary from the bridge client
312
9
235,696
def start_gen ( port , ser = 'msgpack' , version = '2.2' , detector = 'AGIPD' , raw = False , nsources = 1 , datagen = 'random' , * , debug = True ) : context = zmq . Context ( ) socket = context . socket ( zmq . REP ) socket . setsockopt ( zmq . LINGER , 0 ) socket . bind ( 'tcp://*:{}' . format ( port ) ) if ser != 'msgpack' : raise ValueError ( "Unknown serialisation format %s" % ser ) serialize = partial ( msgpack . dumps , use_bin_type = True ) det = Detector . getDetector ( detector , raw = raw , gen = datagen ) generator = generate ( det , nsources ) print ( 'Simulated Karabo-bridge server started on:\ntcp://{}:{}' . format ( uname ( ) . nodename , port ) ) t_prev = time ( ) n = 0 try : while True : msg = socket . recv ( ) if msg == b'next' : train = next ( generator ) msg = containize ( train , ser , serialize , version ) socket . send_multipart ( msg , copy = False ) if debug : print ( 'Server : emitted train:' , train [ 1 ] [ list ( train [ 1 ] . keys ( ) ) [ 0 ] ] [ 'timestamp.tid' ] ) n += 1 if n % TIMING_INTERVAL == 0 : t_now = time ( ) print ( 'Sent {} trains in {:.2f} seconds ({:.2f} Hz)' '' . format ( TIMING_INTERVAL , t_now - t_prev , TIMING_INTERVAL / ( t_now - t_prev ) ) ) t_prev = t_now else : print ( 'wrong request' ) break except KeyboardInterrupt : print ( '\nStopped.' ) finally : socket . close ( ) context . destroy ( )
Karabo bridge server simulation .
441
6
235,697
def next ( self ) : if self . _pattern == zmq . REQ and not self . _recv_ready : self . _socket . send ( b'next' ) self . _recv_ready = True try : msg = self . _socket . recv_multipart ( copy = False ) except zmq . error . Again : raise TimeoutError ( 'No data received from {} in the last {} ms' . format ( self . _socket . getsockopt_string ( zmq . LAST_ENDPOINT ) , self . _socket . getsockopt ( zmq . RCVTIMEO ) ) ) self . _recv_ready = False return self . _deserialize ( msg )
Request next data container .
160
5
235,698
def zopen ( filename , * args , * * kwargs ) : if Path is not None and isinstance ( filename , Path ) : filename = str ( filename ) name , ext = os . path . splitext ( filename ) ext = ext . upper ( ) if ext == ".BZ2" : if PY_VERSION [ 0 ] >= 3 : return bz2 . open ( filename , * args , * * kwargs ) else : args = list ( args ) if len ( args ) > 0 : args [ 0 ] = "" . join ( [ c for c in args [ 0 ] if c != "t" ] ) if "mode" in kwargs : kwargs [ "mode" ] = "" . join ( [ c for c in kwargs [ "mode" ] if c != "t" ] ) return bz2 . BZ2File ( filename , * args , * * kwargs ) elif ext in ( ".GZ" , ".Z" ) : return gzip . open ( filename , * args , * * kwargs ) else : return io . open ( filename , * args , * * kwargs )
This function wraps around the bz2 gzip and standard python s open function to deal intelligently with bzipped gzipped or standard text files .
251
32
235,699
def reverse_readline ( m_file , blk_size = 4096 , max_mem = 4000000 ) : # Check if the file stream is a bit stream or not is_text = isinstance ( m_file , io . TextIOWrapper ) try : file_size = os . path . getsize ( m_file . name ) except AttributeError : # Bz2 files do not have name attribute. Just set file_size to above # max_mem for now. file_size = max_mem + 1 # If the file size is within our desired RAM use, just reverse it in memory # GZip files must use this method because there is no way to negative seek if file_size < max_mem or isinstance ( m_file , gzip . GzipFile ) : for line in reversed ( m_file . readlines ( ) ) : yield line . rstrip ( ) else : if isinstance ( m_file , bz2 . BZ2File ) : # for bz2 files, seeks are expensive. It is therefore in our best # interest to maximize the blk_size within limits of desired RAM # use. blk_size = min ( max_mem , file_size ) buf = "" m_file . seek ( 0 , 2 ) if is_text : lastchar = m_file . read ( 1 ) else : lastchar = m_file . read ( 1 ) . decode ( "utf-8" ) trailing_newline = ( lastchar == "\n" ) while 1 : newline_pos = buf . rfind ( "\n" ) pos = m_file . tell ( ) if newline_pos != - 1 : # Found a newline line = buf [ newline_pos + 1 : ] buf = buf [ : newline_pos ] if pos or newline_pos or trailing_newline : line += "\n" yield line elif pos : # Need to fill buffer toread = min ( blk_size , pos ) m_file . seek ( pos - toread , 0 ) if is_text : buf = m_file . read ( toread ) + buf else : buf = m_file . read ( toread ) . decode ( "utf-8" ) + buf m_file . seek ( pos - toread , 0 ) if pos == toread : buf = "\n" + buf else : # Start-of-file return
Generator method to read a file line - by - line but backwards . This allows one to efficiently get data at the end of a file .
519
29