idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
241,900
def posthoc_wilcoxon ( a , val_col = None , group_col = None , zero_method = 'wilcox' , correction = False , p_adjust = None , sort = False ) : x , _val_col , _group_col = __convert_to_df ( a , val_col , group_col ) if not sort : x [ _group_col ] = Categorical ( x [ _group_col ] , categories = x [ _group_col ] . unique ( ) , ordered = True ) #x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True) groups = np . unique ( x [ _group_col ] ) x_len = groups . size vs = np . zeros ( ( x_len , x_len ) ) tri_upper = np . triu_indices ( vs . shape [ 0 ] , 1 ) tri_lower = np . tril_indices ( vs . shape [ 0 ] , - 1 ) vs [ : , : ] = 0 combs = it . combinations ( range ( x_len ) , 2 ) for i , j in combs : vs [ i , j ] = ss . wilcoxon ( x . loc [ x [ _group_col ] == groups [ i ] , _val_col ] , x . loc [ x [ _group_col ] == groups [ j ] , _val_col ] , zero_method = zero_method , correction = correction ) [ 1 ] if p_adjust : vs [ tri_upper ] = multipletests ( vs [ tri_upper ] , method = p_adjust ) [ 1 ] vs [ tri_lower ] = vs . T [ tri_lower ] np . fill_diagonal ( vs , - 1 ) return DataFrame ( vs , index = groups , columns = groups )
Pairwise comparisons with Wilcoxon signed - rank test . It is a non - parametric version of the paired T - test for use with non - parametric ANOVA .
409
37
241,901
def shutdown_waits_for ( coro , loop = None ) : loop = loop or get_event_loop ( ) fut = loop . create_future ( ) # This future will connect coro and the caller. async def coro_proxy ( ) : """This function will await coro, but it will also send the result over the the future. Remember: the outside caller (of shutdown_waits_for) will be awaiting fut, NOT coro(), due to the decoupling. However, when coro completes, we need to send its result over to the fut to make it look *as if* it was just coro running the whole time. This whole thing is a teeny magic trick. """ try : result = await coro except ( CancelledError , Exception ) as e : set_fut_done = partial ( fut . set_exception , e ) else : set_fut_done = partial ( fut . set_result , result ) if not fut . cancelled ( ) : set_fut_done ( ) new_coro = coro_proxy ( ) # We'll taskify this one instead of coro. _DO_NOT_CANCEL_COROS . add ( new_coro ) # The new task must not be cancelled. loop . create_task ( new_coro ) # Make the task # Ok, so we *could* simply return fut. Callers can await it as normal, # e.g. # # async def blah(): # x = await shutdown_waits_for(bleh()) # # That will work fine. However, callers may *also* want to detach the # call from the current execution context, e.g. # # async def blah(): # loop.create_task(shutdown_waits_for(bleh())) # # This will only work if shutdown_waits_for() returns a coroutine. # Therefore, we just make a new coroutine to wrap the `await fut` and # return that. Then both things will work. # # (Side note: instead of callers using create_tasks, it would also work # if they used `asyncio.ensure_future()` instead, since that can work # with futures. But I don't like ensure_future.) # # (Another side note: You don't even need `create_task()` or # `ensure_future()`...If you don't want a result, you can just call # `shutdown_waits_for()` as a flat function call, no await or anything, # and it should still work; unfortunately it causes a RuntimeWarning to # tell you that ``inner()`` was never awaited :/ async def inner ( ) : return await fut return inner ( )
Prevent coro from being cancelled during the shutdown sequence .
594
12
241,902
def command ( self , * args , * * kwargs ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , collections . Callable ) : return self . _generate_command ( args [ 0 ] ) else : def _command ( func ) : return self . _generate_command ( func , * args , * * kwargs ) return _command
Convenient decorator simply creates corresponding command
83
8
241,903
def _generate_command ( self , func , name = None , * * kwargs ) : func_pointer = name or func . __name__ storm_config = get_storm_config ( ) aliases , additional_kwarg = None , None if 'aliases' in storm_config : for command , alias_list in six . iteritems ( storm_config . get ( "aliases" ) ) : if func_pointer == command : aliases = alias_list break func_help = func . __doc__ and func . __doc__ . strip ( ) subparser = self . subparsers . add_parser ( name or func . __name__ , aliases = aliases , help = func_help ) spec = inspect . getargspec ( func ) opts = reversed ( list ( izip_longest ( reversed ( spec . args or [ ] ) , reversed ( spec . defaults or [ ] ) , fillvalue = self . _POSITIONAL ( ) ) ) ) for k , v in opts : argopts = getattr ( func , 'argopts' , { } ) args , kwargs = argopts . get ( k , ( [ ] , { } ) ) args = list ( args ) is_positional = isinstance ( v , self . _POSITIONAL ) options = [ arg for arg in args if arg . startswith ( '-' ) ] if isinstance ( v , list ) : kwargs . update ( { 'action' : 'append' , } ) if is_positional : if options : args = options kwargs . update ( { 'required' : True , 'dest' : k } ) else : args = [ k ] else : args = options or [ '--%s' % k ] kwargs . update ( { 'default' : v , 'dest' : k } ) arg = subparser . add_argument ( * args , * * kwargs ) subparser . set_defaults ( * * { self . _COMMAND_FLAG : func } ) return func
Generates a command parser for given func .
443
9
241,904
def execute ( self , arg_list ) : arg_map = self . parser . parse_args ( arg_list ) . __dict__ command = arg_map . pop ( self . _COMMAND_FLAG ) return command ( * * arg_map )
Main function to parse and dispatch commands by given arg_list
56
12
241,905
def add ( name , connection_uri , id_file = "" , o = [ ] , config = None ) : storm_ = get_storm_instance ( config ) try : # validate name if '@' in name : raise ValueError ( 'invalid value: "@" cannot be used in name.' ) user , host , port = parse ( connection_uri , user = get_default ( "user" , storm_ . defaults ) , port = get_default ( "port" , storm_ . defaults ) ) storm_ . add_entry ( name , host , user , port , id_file , o ) print ( get_formatted_message ( '{0} added to your ssh config. you can connect ' 'it by typing "ssh {0}".' . format ( name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Adds a new entry to sshconfig .
212
8
241,906
def clone ( name , clone_name , config = None ) : storm_ = get_storm_instance ( config ) try : # validate name if '@' in name : raise ValueError ( 'invalid value: "@" cannot be used in name.' ) storm_ . clone_entry ( name , clone_name ) print ( get_formatted_message ( '{0} added to your ssh config. you can connect ' 'it by typing "ssh {0}".' . format ( clone_name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Clone an entry to the sshconfig .
151
9
241,907
def move ( name , entry_name , config = None ) : storm_ = get_storm_instance ( config ) try : if '@' in name : raise ValueError ( 'invalid value: "@" cannot be used in name.' ) storm_ . clone_entry ( name , entry_name , keep_original = False ) print ( get_formatted_message ( '{0} moved in ssh config. you can ' 'connect it by typing "ssh {0}".' . format ( entry_name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Move an entry to the sshconfig .
153
8
241,908
def edit ( name , connection_uri , id_file = "" , o = [ ] , config = None ) : storm_ = get_storm_instance ( config ) try : if ',' in name : name = " " . join ( name . split ( "," ) ) user , host , port = parse ( connection_uri , user = get_default ( "user" , storm_ . defaults ) , port = get_default ( "port" , storm_ . defaults ) ) storm_ . edit_entry ( name , host , user , port , id_file , o ) print ( get_formatted_message ( '"{0}" updated successfully.' . format ( name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Edits the related entry in ssh config .
188
9
241,909
def update ( name , connection_uri = "" , id_file = "" , o = [ ] , config = None ) : storm_ = get_storm_instance ( config ) settings = { } if id_file != "" : settings [ 'identityfile' ] = id_file for option in o : k , v = option . split ( "=" ) settings [ k ] = v try : storm_ . update_entry ( name , * * settings ) print ( get_formatted_message ( '"{0}" updated successfully.' . format ( name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Enhanced version of the edit command featuring multiple edits using regular expressions to match entries
162
15
241,910
def delete ( name , config = None ) : storm_ = get_storm_instance ( config ) try : storm_ . delete_entry ( name ) print ( get_formatted_message ( 'hostname "{0}" deleted successfully.' . format ( name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Deletes a single host .
97
6
241,911
def list ( config = None ) : storm_ = get_storm_instance ( config ) try : result = colored ( 'Listing entries:' , 'white' , attrs = [ "bold" , ] ) + "\n\n" result_stack = "" for host in storm_ . list_entries ( True ) : if host . get ( "type" ) == 'entry' : if not host . get ( "host" ) == "*" : result += " {0} -> {1}@{2}:{3}" . format ( colored ( host [ "host" ] , 'green' , attrs = [ "bold" , ] ) , host . get ( "options" ) . get ( "user" , get_default ( "user" , storm_ . defaults ) ) , host . get ( "options" ) . get ( "hostname" , "[hostname_not_specified]" ) , host . get ( "options" ) . get ( "port" , get_default ( "port" , storm_ . defaults ) ) ) extra = False for key , value in six . iteritems ( host . get ( "options" ) ) : if not key in [ "user" , "hostname" , "port" ] : if not extra : custom_options = colored ( '\n\t[custom options] ' , 'white' ) result += " {0}" . format ( custom_options ) extra = True if isinstance ( value , collections . Sequence ) : if isinstance ( value , builtins . list ) : value = "," . join ( value ) result += "{0}={1} " . format ( key , value ) if extra : result = result [ 0 : - 1 ] result += "\n\n" else : result_stack = colored ( " (*) General options: \n" , "green" , attrs = [ "bold" , ] ) for key , value in six . iteritems ( host . get ( "options" ) ) : if isinstance ( value , type ( [ ] ) ) : result_stack += "\t {0}: " . format ( colored ( key , "magenta" ) ) result_stack += ', ' . join ( value ) result_stack += "\n" else : result_stack += "\t {0}: {1}\n" . format ( colored ( key , "magenta" ) , value , ) result_stack = result_stack [ 0 : - 1 ] + "\n" result += result_stack print ( get_formatted_message ( result , "" ) ) except Exception as error : print ( get_formatted_message ( str ( error ) , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Lists all hosts from ssh config .
591
8
241,912
def search ( search_text , config = None ) : storm_ = get_storm_instance ( config ) try : results = storm_ . search_host ( search_text ) if len ( results ) == 0 : print ( 'no results found.' ) if len ( results ) > 0 : message = 'Listing results for {0}:\n' . format ( search_text ) message += "" . join ( results ) print ( message ) except Exception as error : print ( get_formatted_message ( str ( error ) , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Searches entries by given search text .
132
9
241,913
def delete_all ( config = None ) : storm_ = get_storm_instance ( config ) try : storm_ . delete_all_entries ( ) print ( get_formatted_message ( 'all entries deleted.' , 'success' ) ) except Exception as error : print ( get_formatted_message ( str ( error ) , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Deletes all hosts from ssh config .
92
8
241,914
def backup ( target_file , config = None ) : storm_ = get_storm_instance ( config ) try : storm_ . backup ( target_file ) except Exception as error : print ( get_formatted_message ( str ( error ) , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Backups the main ssh configuration into target file .
72
10
241,915
def web ( port , debug = False , theme = "modern" , ssh_config = None ) : from storm import web as _web _web . run ( port , debug , theme , ssh_config )
Starts the web UI .
44
6
241,916
def _strip_list_attributes ( graph_ ) : for n_ in graph_ . nodes ( data = True ) : for k , v in n_ [ 1 ] . iteritems ( ) : if type ( v ) is list : graph_ . node [ n_ [ 0 ] ] [ k ] = unicode ( v ) for e_ in graph_ . edges ( data = True ) : for k , v in e_ [ 2 ] . iteritems ( ) : if type ( v ) is list : graph_ . edge [ e_ [ 0 ] ] [ e_ [ 1 ] ] [ k ] = unicode ( v ) return graph_
Converts lists attributes to strings for all nodes and edges in G .
139
14
241,917
def _safe_type ( value ) : if type ( value ) is str : dtype = 'string' if type ( value ) is unicode : dtype = 'string' if type ( value ) is int : dtype = 'integer' if type ( value ) is float : dtype = 'real' return dtype
Converts Python type names to XGMML - safe type names .
69
14
241,918
def read ( path , corpus = True , index_by = 'wosid' , streaming = False , parse_only = None , corpus_class = Corpus , * * kwargs ) : if not os . path . exists ( path ) : raise ValueError ( 'No such file or directory' ) # We need the primary index field in the parse results. if parse_only : parse_only . append ( index_by ) if streaming : return streaming_read ( path , corpus = corpus , index_by = index_by , parse_only = parse_only , * * kwargs ) if os . path . isdir ( path ) : # Directory containing 1+ WoS data files. papers = [ ] for sname in os . listdir ( path ) : if sname . endswith ( 'txt' ) and not sname . startswith ( '.' ) : papers += read ( os . path . join ( path , sname ) , corpus = False , parse_only = parse_only ) else : # A single data file. papers = WoSParser ( path ) . parse ( parse_only = parse_only ) if corpus : return corpus_class ( papers , index_by = index_by , * * kwargs ) return papers
Parse one or more WoS field - tagged data files .
271
13
241,919
def parse_author ( self , value ) : tokens = tuple ( [ t . upper ( ) . strip ( ) for t in value . split ( ',' ) ] ) if len ( tokens ) == 1 : tokens = value . split ( ' ' ) if len ( tokens ) > 0 : if len ( tokens ) > 1 : aulast , auinit = tokens [ 0 : 2 ] # Ignore JR, II, III, etc. else : aulast = tokens [ 0 ] auinit = '' else : aulast , auinit = tokens [ 0 ] , '' aulast = _strip_punctuation ( aulast ) . upper ( ) auinit = _strip_punctuation ( auinit ) . upper ( ) return aulast , auinit
Attempts to split an author name into last and first parts .
164
12
241,920
def handle_CR ( self , value ) : citation = self . entry_class ( ) value = strip_tags ( value ) # First-author name and publication date. ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)' ny_match = re . match ( ptn , value , flags = re . U ) nj_match = re . match ( '([\w\s\W]+),\s([\w\s]+)' , value , flags = re . U ) if ny_match is not None : name_raw , date , journal = ny_match . groups ( ) elif nj_match is not None : name_raw , journal = nj_match . groups ( ) date = None else : return datematch = re . match ( '([0-9]{4})' , value ) if datematch : date = datematch . group ( 1 ) name_raw = None if name_raw : name_tokens = [ t . replace ( '.' , '' ) for t in name_raw . split ( ' ' ) ] if len ( name_tokens ) > 4 or value . startswith ( '*' ) : # Probably not a person. proc = lambda x : _strip_punctuation ( x ) aulast = ' ' . join ( [ proc ( n ) for n in name_tokens ] ) . upper ( ) auinit = '' elif len ( name_tokens ) > 0 : aulast = name_tokens [ 0 ] . upper ( ) proc = lambda x : _space_sep ( _strip_punctuation ( x ) ) auinit = ' ' . join ( [ proc ( n ) for n in name_tokens [ 1 : ] ] ) . upper ( ) else : aulast = name_tokens [ 0 ] . upper ( ) auinit = '' setattr ( citation , 'authors_init' , [ ( aulast , auinit ) ] ) if date : date = int ( date ) setattr ( citation , 'date' , date ) setattr ( citation , 'journal' , journal ) # Volume. v_match = re . search ( '\,\s+V([0-9A-Za-z]+)' , value ) if v_match is not None : volume = v_match . group ( 1 ) else : volume = None setattr ( citation , 'volume' , volume ) # Start page. p_match = re . search ( '\,\s+[Pp]([0-9A-Za-z]+)' , value ) if p_match is not None : page = p_match . group ( 1 ) else : page = None setattr ( citation , 'pageStart' , page ) # DOI. doi_match = re . search ( 'DOI\s(.*)' , value ) if doi_match is not None : doi = doi_match . group ( 1 ) else : doi = None setattr ( citation , 'doi' , doi ) return citation
Parses cited references .
688
6
241,921
def postprocess_WC ( self , entry ) : if type ( entry . WC ) not in [ str , unicode ] : WC = u' ' . join ( [ unicode ( k ) for k in entry . WC ] ) else : WC = entry . WC entry . WC = [ k . strip ( ) . upper ( ) for k in WC . split ( ';' ) ]
Parse WC keywords .
82
5
241,922
def postprocess_subject ( self , entry ) : if type ( entry . subject ) not in [ str , unicode ] : subject = u' ' . join ( [ unicode ( k ) for k in entry . subject ] ) else : subject = entry . subject entry . subject = [ k . strip ( ) . upper ( ) for k in subject . split ( ';' ) ]
Parse subject keywords .
82
5
241,923
def postprocess_authorKeywords ( self , entry ) : if type ( entry . authorKeywords ) not in [ str , unicode ] : aK = u' ' . join ( [ unicode ( k ) for k in entry . authorKeywords ] ) else : aK = entry . authorKeywords entry . authorKeywords = [ k . strip ( ) . upper ( ) for k in aK . split ( ';' ) ]
Parse author keywords .
95
5
241,924
def postprocess_keywordsPlus ( self , entry ) : if type ( entry . keywordsPlus ) in [ str , unicode ] : entry . keywordsPlus = [ k . strip ( ) . upper ( ) for k in entry . keywordsPlus . split ( ';' ) ]
Parse WoS Keyword Plus keywords .
59
9
241,925
def postprocess_funding ( self , entry ) : if type ( entry . funding ) not in [ str , unicode ] : return sources = [ fu . strip ( ) for fu in entry . funding . split ( ';' ) ] sources_processed = [ ] for source in sources : m = re . search ( '(.*)?\s+\[(.+)\]' , source ) if m : agency , grant = m . groups ( ) else : agency , grant = source , None sources_processed . append ( ( agency , grant ) ) entry . funding = sources_processed
Separates funding agency from grant numbers .
126
9
241,926
def postprocess_authors_full ( self , entry ) : if type ( entry . authors_full ) is not list : entry . authors_full = [ entry . authors_full ]
If only a single author was found ensure that authors_full is nonetheless a list .
39
17
241,927
def postprocess_authors_init ( self , entry ) : if type ( entry . authors_init ) is not list : entry . authors_init = [ entry . authors_init ]
If only a single author was found ensure that authors_init is nonetheless a list .
39
17
241,928
def postprocess_citedReferences ( self , entry ) : if type ( entry . citedReferences ) is not list : entry . citedReferences = [ entry . citedReferences ]
If only a single cited reference was found ensure that citedReferences is nonetheless a list .
36
17
241,929
def plot_burstness ( corpus , B , * * kwargs ) : try : import matplotlib . pyplot as plt import matplotlib . patches as mpatches except ImportError : raise RuntimeError ( 'This method requires the package matplotlib.' ) color = kwargs . get ( 'color' , 'red' ) # Get width based on slices. years = sorted ( corpus . indices [ 'date' ] . keys ( ) ) width = years [ 1 ] - years [ 0 ] height = 1.0 fig = plt . figure ( figsize = ( 10 , len ( B ) / 4. ) ) f = 1 axes = { } for key , value in B . iteritems ( ) : x , y = value ax = fig . add_subplot ( len ( B ) , 1 , f ) f += 1 ax . set_yticks ( [ ] ) ax . set_xbound ( min ( years ) , max ( years ) + 1 ) if not f == len ( B ) + 1 : # Only show xticks on the bottom subplot. ax . set_xticklabels ( [ ] ) # Block out years until first occurrence of feature. rect = mpatches . Rectangle ( ( min ( years ) , 0 ) , sorted ( x ) [ 0 ] - min ( years ) , height , fill = True , linewidth = 0.0 ) rect . set_facecolor ( 'black' ) rect . set_alpha ( 0.3 ) ax . add_patch ( rect ) # Add a rectangle for each year, shaded according to burstness state. for d in xrange ( min ( x ) , max ( x ) ) : try : i = x . index ( d ) except ValueError : continue xy = ( d , 0. ) state = y [ i ] rect = mpatches . Rectangle ( xy , width , height , fill = True , linewidth = 0.0 ) rect . set_facecolor ( color ) rect . set_alpha ( state ) ax . add_patch ( rect ) ax . set_ylabel ( key , rotation = 0 , horizontalalignment = 'right' , verticalalignment = 'center' ) plt . subplots_adjust ( left = 0.5 ) fig . tight_layout ( h_pad = 0.25 ) plt . show ( )
Generate a figure depicting burstness profiles for feature .
509
11
241,930
def simplify_multigraph ( multigraph , time = False ) : graph = nx . Graph ( ) for node in multigraph . nodes ( data = True ) : u = node [ 0 ] node_attribs = node [ 1 ] graph . add_node ( u , node_attribs ) for v in multigraph [ u ] : edges = multigraph . get_edge_data ( u , v ) # Dict. edge_attribs = { 'weight' : len ( edges ) } if time : # Look for a date in each edge. start = 3000 end = 0 found_date = False for edge in edges . values ( ) : try : found_date = True if edge [ 'date' ] < start : start = edge [ 'date' ] if edge [ 'date' ] > end : end = edge [ 'date' ] except KeyError : # No date to be found. pass if found_date : # If no date found, don't add start/end atts. edge_attribs [ 'start' ] = start edge_attribs [ 'end' ] = end graph . add_edge ( u , v , edge_attribs ) return graph
Simplifies a graph by condensing multiple edges between the same node pair into a single edge with a weight attribute equal to the number of edges .
258
30
241,931
def citation_count ( papers , key = 'ayjid' , verbose = False ) : if verbose : print "Generating citation counts for " + unicode ( len ( papers ) ) + " papers..." counts = Counter ( ) for P in papers : if P [ 'citations' ] is not None : for p in P [ 'citations' ] : counts [ p [ key ] ] += 1 return counts
Generates citation counts for all of the papers cited by papers .
90
13
241,932
def connected ( G , method_name , * * kwargs ) : warnings . warn ( "To be removed in 0.8. Use GraphCollection.analyze instead." , DeprecationWarning ) return G . analyze ( [ 'connected' , method_name ] , * * kwargs )
Performs analysis methods from networkx . connected on each graph in the collection .
64
16
241,933
def attachment_probability ( G ) : warnings . warn ( "Removed in 0.8. Too domain-specific." ) probs = { } G_ = None k_ = None for k , g in G . graphs . iteritems ( ) : new_edges = { } if G_ is not None : for n in g . nodes ( ) : try : old_neighbors = set ( G_ [ n ] . keys ( ) ) if len ( old_neighbors ) > 0 : new_neighbors = set ( g [ n ] . keys ( ) ) - old_neighbors new_edges [ n ] = float ( len ( new_neighbors ) ) else : new_edges [ n ] = 0. except KeyError : pass N = sum ( new_edges . values ( ) ) probs [ k_ ] = { n : 0. for n in G_ . nodes ( ) } if N > 0. : for n in G . nodes ( ) : try : probs [ k_ ] [ n ] = new_edges [ n ] / N except KeyError : pass if probs [ k_ ] is not None : networkx . set_node_attributes ( G . graphs [ k_ ] , 'attachment_probability' , probs [ k_ ] ) G_ = G k_ = k # Handle last graph (no values). key = G . graphs . keys ( ) [ - 1 ] zprobs = { n : 0. for n in G . graphs [ key ] . nodes ( ) } networkx . set_node_attributes ( G . graphs [ key ] , 'attachment_probability' , zprobs ) return probs
Calculates the observed attachment probability for each node at each time - step . Attachment probability is calculated based on the observed new edges in the next time - step . So if a node acquires new edges at time t this will accrue to the node s attachment probability at time t - 1 . Thus at a given time one can ask whether degree and attachment probability are related .
374
77
241,934
def global_closeness_centrality ( g , node = None , normalize = True ) : if not node : C = { } for node in g . nodes ( ) : C [ node ] = global_closeness_centrality ( g , node , normalize = normalize ) return C values = nx . shortest_path_length ( g , node ) . values ( ) c = sum ( [ 1. / pl for pl in values if pl != 0. ] ) / len ( g ) if normalize : ac = 0 for sg in nx . connected_component_subgraphs ( g ) : if len ( sg . nodes ( ) ) > 1 : aspl = nx . average_shortest_path_length ( sg ) ac += ( 1. / aspl ) * ( float ( len ( sg ) ) / float ( len ( g ) ) ** 2 ) c = c / ac return c
Calculates global closeness centrality for one or all nodes in the network .
202
17
241,935
def ngrams ( path , elem , ignore_hash = True ) : grams = GramGenerator ( path , elem , ignore_hash = ignore_hash ) return FeatureSet ( { k : Feature ( f ) for k , f in grams } )
Yields N - grams from a JSTOR DfR dataset .
55
16
241,936
def tokenize ( ngrams , min_tf = 2 , min_df = 2 , min_len = 3 , apply_stoplist = False ) : vocab = { } vocab_ = { } word_tf = Counter ( ) word_df = Counter ( ) token_tf = Counter ( ) token_df = Counter ( ) t_ngrams = { } # Get global word counts, first. for grams in ngrams . values ( ) : for g , c in grams : word_tf [ g ] += c word_df [ g ] += 1 if apply_stoplist : stoplist = stopwords . words ( ) # Now tokenize. for doi , grams in ngrams . iteritems ( ) : t_ngrams [ doi ] = [ ] for g , c in grams : ignore = False # Ignore extremely rare words (probably garbage). if word_tf [ g ] < min_tf or word_df [ g ] < min_df or len ( g ) < min_len : ignore = True # Stoplist. elif apply_stoplist : for w in g . split ( ) : if w in stoplist : ignore = True if not ignore : # Coerce unicode to string. if type ( g ) is str : g = unicode ( g ) g = unidecode ( g ) if g not in vocab . values ( ) : i = len ( vocab ) vocab [ i ] = g vocab_ [ g ] = i else : i = vocab_ [ g ] token_tf [ i ] += c token_df [ i ] += 1 t_ngrams [ doi ] . append ( ( i , c ) ) return t_ngrams , vocab , token_tf
Builds a vocabulary and replaces words with vocab indices .
372
12
241,937
def _handle_pagerange ( pagerange ) : try : pr = re . compile ( "pp\.\s([0-9]+)\-([0-9]+)" ) start , end = re . findall ( pr , pagerange ) [ 0 ] except IndexError : start = end = 0 return unicode ( start ) , unicode ( end )
Yields start and end pages from DfR pagerange field .
79
16
241,938
def _handle_authors ( authors ) : aulast = [ ] auinit = [ ] if type ( authors ) is list : for author in authors : if type ( author ) is str : author = unicode ( author ) author = unidecode ( author ) try : l , i = _handle_author ( author ) aulast . append ( l ) auinit . append ( i ) except ValueError : pass elif type ( authors ) is str or type ( authors ) is unicode : if type ( authors ) is str : authors = unicode ( authors ) author = unidecode ( authors ) try : l , i = _handle_author ( author ) aulast . append ( l ) auinit . append ( i ) except ValueError : pass else : raise ValueError ( "authors must be a list or a string" ) return aulast , auinit
Yields aulast and auinit lists from value of authors node .
186
16
241,939
def _handle_author ( author ) : lname = author . split ( ' ' ) try : auinit = lname [ 0 ] [ 0 ] final = lname [ - 1 ] . upper ( ) if final in [ 'JR.' , 'III' ] : aulast = lname [ - 2 ] . upper ( ) + " " + final . strip ( "." ) else : aulast = final except IndexError : raise ValueError ( "malformed author name" ) return aulast , auinit
Yields aulast and auinit from an author s full name .
111
16
241,940
def _get ( self , i ) : with open ( os . path . join ( self . path , self . elem , self . files [ i ] ) , 'r' ) as f : # JSTOR hasn't always produced valid XML. contents = re . sub ( '(&)(?!amp;)' , lambda match : '&amp;' , f . read ( ) ) root = ET . fromstring ( contents ) doi = root . attrib [ 'id' ] if self . K : # Keys only. return doi grams = [ ] for gram in root . findall ( self . elem_xml ) : text = unidecode ( unicode ( gram . text . strip ( ) ) ) if ( not self . ignore_hash or '#' not in list ( text ) ) : c = ( text , number ( gram . attrib [ 'weight' ] ) ) grams . append ( c ) if self . V : # Values only. return grams return doi , grams
Retrieve data for the ith file in the dataset .
210
12
241,941
def _generate_corpus ( self ) : target = self . temp + 'mallet' paths = write_documents ( self . corpus , target , self . featureset_name , [ 'date' , 'title' ] ) self . corpus_path , self . metapath = paths self . _export_corpus ( )
Writes a corpus to disk amenable to MALLET topic modeling .
75
15
241,942
def _export_corpus ( self ) : # bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt # --output mytopic-input.mallet --keep-sequence --remove-stopwords if not os . path . exists ( self . mallet_bin ) : raise IOError ( "MALLET path invalid or non-existent." ) self . input_path = os . path . join ( self . temp , "input.mallet" ) exit = subprocess . call ( [ self . mallet_bin , 'import-file' , '--input' , self . corpus_path , '--output' , self . input_path , '--keep-sequence' , # Required for LDA. '--remove-stopwords' ] ) # Probably redundant. if exit != 0 : msg = "MALLET import-file failed with exit code {0}." . format ( exit ) raise RuntimeError ( msg )
Calls MALLET s import - file method .
211
11
241,943
def run ( self , * * kwargs ) : #$ bin/mallet train-topics --input mytopic-input.mallet #> --num-topics 100 #> --output-doc-topics /Users/erickpeirson/doc_top #> --word-topic-counts-file /Users/erickpeirson/word_top #> --output-topic-keys /Users/erickpeirson/topic_keys if not os . path . exists ( self . mallet_bin ) : raise IOError ( "MALLET path invalid or non-existent." ) for attr in [ 'Z' , 'max_iter' ] : if not hasattr ( self , attr ) : raise AttributeError ( 'Please set {0}' . format ( attr ) ) self . ll = [ ] self . num_iters = 0 logger . debug ( 'run() with k={0} for {1} iterations' . format ( self . Z , self . max_iter ) ) prog = re . compile ( u'\<([^\)]+)\>' ) ll_prog = re . compile ( r'(\d+)' ) p = subprocess . Popen ( [ self . mallet_bin , 'train-topics' , '--input' , self . input_path , '--num-topics' , unicode ( self . Z ) , '--num-iterations' , unicode ( self . max_iter ) , '--output-doc-topics' , self . dt , '--word-topic-counts-file' , self . wt , '--output-model' , self . om ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) # Handle output of MALLET in real time. while p . poll ( ) is None : l = p . stderr . readline ( ) # Keep track of LL/topic. try : this_ll = float ( re . findall ( u'([-+]\d+\.\d+)' , l ) [ 0 ] ) self . ll . append ( this_ll ) except IndexError : # Not every line will match. pass # Keep track of modeling progress. try : this_iter = float ( prog . match ( l ) . groups ( ) [ 0 ] ) progress = int ( 100. * this_iter / self . max_iter ) print 'Modeling progress: {0}%.\r' . format ( progress ) , except AttributeError : # Not every line will match. pass self . num_iters += self . max_iter self . load ( )
Calls MALLET s train - topic method .
580
11
241,944
def topics_in ( self , d , topn = 5 ) : return self . theta . features [ d ] . top ( topn )
List the top topn topics in document d .
31
10
241,945
def list_topic ( self , k , Nwords = 10 ) : return [ ( self . vocabulary [ w ] , p ) for w , p in self . phi . features [ k ] . top ( Nwords ) ]
List the top topn words for topic k .
48
10
241,946
def list_topics ( self , Nwords = 10 ) : return [ ( k , self . list_topic ( k , Nwords ) ) for k in xrange ( len ( self . phi ) ) ]
List the top Nwords words for each topic .
46
10
241,947
def print_topics ( self , Nwords = 10 ) : print ( 'Topic\tTop %i words' % Nwords ) for k , words in self . list_topics ( Nwords ) : print ( unicode ( k ) . ljust ( 3 ) + '\t' + ' ' . join ( list ( zip ( * words ) ) [ 0 ] ) )
Print the top Nwords words for each topic .
82
10
241,948
def topic_over_time ( self , k , mode = 'counts' , slice_kwargs = { } ) : return self . corpus . feature_distribution ( 'topics' , k , mode = mode , * * slice_kwargs )
Calculate the representation of topic k in the corpus over time .
55
14
241,949
def distribution ( self , * * slice_kwargs ) : values = [ ] keys = [ ] for key , size in self . slice ( count_only = True , * * slice_kwargs ) : values . append ( size ) keys . append ( key ) return keys , values
Calculates the number of papers in each slice as defined by slice_kwargs .
60
18
241,950
def feature_distribution ( self , featureset_name , feature , mode = 'counts' , * * slice_kwargs ) : values = [ ] keys = [ ] fset = self . features [ featureset_name ] for key , papers in self . slice ( subcorpus = False , * * slice_kwargs ) : allfeatures = [ v for v in chain ( * [ fset . features [ self . _generate_index ( p ) ] for p in papers if self . _generate_index ( p ) in fset . features ] ) ] if len ( allfeatures ) < 1 : keys . append ( key ) values . append ( 0. ) continue count = 0. for elem , v in allfeatures : if elem != feature : continue if mode == 'counts' : count += v else : count += 1. values . append ( count ) keys . append ( key ) return keys , values
Calculates the distribution of a feature across slices of the corpus .
201
14
241,951
def top_features ( self , featureset_name , topn = 20 , by = 'counts' , perslice = False , slice_kwargs = { } ) : if perslice : return [ ( k , subcorpus . features [ featureset_name ] . top ( topn , by = by ) ) for k , subcorpus in self . slice ( * * slice_kwargs ) ] return self . features [ featureset_name ] . top ( topn , by = by )
Retrieves the top topn most numerous features in the corpus .
112
14
241,952
def feature_burstness ( corpus , featureset_name , feature , k = 5 , normalize = True , s = 1.1 , gamma = 1. , * * slice_kwargs ) : if featureset_name not in corpus . features : corpus . index_feature ( featureset_name ) if 'date' not in corpus . indices : corpus . index ( 'date' ) # Get time-intervals between occurrences. dates = [ min ( corpus . indices [ 'date' ] . keys ( ) ) - 1 ] # Pad start. X_ = [ 1. ] years , values = corpus . feature_distribution ( featureset_name , feature ) for year , N in izip ( years , values ) : if N == 0 : continue if N > 1 : if year == dates [ - 1 ] + 1 : for n in xrange ( int ( N ) ) : X_ . append ( 1. / N ) dates . append ( year ) else : X_ . append ( float ( year - dates [ - 1 ] ) ) dates . append ( year ) for n in xrange ( int ( N ) - 1 ) : X_ . append ( 1. / ( N - 1 ) ) dates . append ( year ) else : X_ . append ( float ( year - dates [ - 1 ] ) ) dates . append ( year ) # Get optimum state sequence. st = _forward ( map ( lambda x : x * 100 , X_ ) , s = s , gamma = gamma , k = k ) # Bin by date. A = defaultdict ( list ) for i in xrange ( len ( X_ ) ) : A [ dates [ i ] ] . append ( st [ i ] ) # Normalize. if normalize : A = { key : mean ( values ) / k for key , values in A . items ( ) } else : A = { key : mean ( values ) for key , values in A . items ( ) } D = sorted ( A . keys ( ) ) return D [ 1 : ] , [ A [ d ] for d in D [ 1 : ] ]
Estimate burstness profile for a feature over the date axis .
447
13
241,953
def cocitation ( corpus , min_weight = 1 , edge_attrs = [ 'ayjid' , 'date' ] , * * kwargs ) : return cooccurrence ( corpus , 'citations' , min_weight = min_weight , edge_attrs = edge_attrs , * * kwargs )
Generate a cocitation network .
72
7
241,954
def context_chunk ( self , context , j ) : N_chunks = len ( self . contexts [ context ] ) start = self . contexts [ context ] [ j ] if j == N_chunks - 1 : end = len ( self ) else : end = self . contexts [ context ] [ j + 1 ] return [ self [ i ] for i in xrange ( start , end ) ]
Retrieve the tokens in the j th chunk of context context .
86
13
241,955
def add_context ( self , name , indices , level = None ) : self . _validate_context ( ( name , indices ) ) if level is None : level = len ( self . contexts_ranked ) self . contexts_ranked . insert ( level , name ) self . contexts [ name ] = indices
Add a new context level to the hierarchy .
65
9
241,956
def index ( self , name , graph ) : nodes = graph . nodes ( ) # Index new nodes. new_nodes = list ( set ( nodes ) - set ( self . node_index . values ( ) ) ) start = max ( len ( self . node_index ) - 1 , max ( self . node_index . keys ( ) ) ) for i in xrange ( start , start + len ( new_nodes ) ) : n = new_nodes . pop ( ) self . node_index [ i ] , self . node_lookup [ n ] = n , i self . graphs_containing [ n ] . append ( name ) # Relabel nodes in `graph`. new_labels = { n : self . node_lookup [ n ] for n in nodes } indexed_graph = nx . relabel . relabel_nodes ( graph , new_labels , copy = True ) return indexed_graph
Index any new nodes in graph and relabel the nodes in graph using the index .
200
17
241,957
def terms ( model , threshold = 0.01 , * * kwargs ) : select = lambda f , v , c , dc : v > threshold graph = cooccurrence ( model . phi , filter = select , * * kwargs ) # Only include labels for terms that are actually in the graph. label_map = { k : v for k , v in model . vocabulary . items ( ) if k in graph . nodes ( ) } graph . name = '' return networkx . relabel_nodes ( graph , label_map )
Two terms are coupled if the posterior probability for both terms is greather than threshold for the same topic .
116
21
241,958
def topic_coupling ( model , threshold = None , * * kwargs ) : if not threshold : threshold = 3. / model . Z select = lambda f , v , c , dc : v > threshold graph = coupling ( model . corpus , 'topics' , filter = select , * * kwargs ) graph . name = '' return graph
Two papers are coupled if they both contain a shared topic above a threshold .
76
15
241,959
def kl_divergence ( V_a , V_b ) : # Find shared features. Ndiff = _shared_features ( V_a , V_b ) # aprob and bprob should each sum to 1.0 aprob = map ( lambda v : float ( v ) / sum ( V_a ) , V_a ) bprob = map ( lambda v : float ( v ) / sum ( V_b ) , V_b ) # Smooth according to Bigi 2003. aprob , bprob = _smooth ( aprob , bprob , Ndiff ) return sum ( map ( lambda a , b : ( a - b ) * log ( a / b ) , aprob , bprob ) )
Calculate Kullback - Leibler distance .
166
12
241,960
def _shared_features ( adense , bdense ) : a_indices = set ( nonzero ( adense ) ) b_indices = set ( nonzero ( bdense ) ) shared = list ( a_indices & b_indices ) diff = list ( a_indices - b_indices ) Ndiff = len ( diff ) return Ndiff
Number of features in adense that are also in bdense .
81
14
241,961
def cooccurrence ( corpus_or_featureset , featureset_name = None , min_weight = 1 , edge_attrs = [ 'ayjid' , 'date' ] , filter = None ) : if not filter : filter = lambda f , v , c , dc : dc >= min_weight featureset = _get_featureset ( corpus_or_featureset , featureset_name ) if type ( corpus_or_featureset ) in [ Corpus , StreamingCorpus ] : attributes = { i : { a : corpus_or_featureset . indices_lookup [ i ] [ a ] for a in edge_attrs } for i in corpus_or_featureset . indexed_papers . keys ( ) } c = lambda f : featureset . count ( f ) # Overall count. dc = lambda f : featureset . documentCount ( f ) # Document count. attributes = { } # select applies filter to the elements in a (Structured)Feature. The # iteration behavior of Feature and StructuredFeature are different, as is # the manner in which the count for an element in each (Structured)Feature. if type ( featureset ) is FeatureSet : select = lambda feature : [ f for f , v in feature if filter ( f , v , c ( f ) , dc ( f ) ) ] elif type ( featureset ) is StructuredFeatureSet : select = lambda feature : [ f for f in feature if filter ( f , feature . count ( f ) , c ( f ) , dc ( f ) ) ] pairs = Counter ( ) eattrs = defaultdict ( dict ) nattrs = defaultdict ( dict ) nset = set ( ) for paper , feature in featureset . iteritems ( ) : if len ( feature ) == 0 : continue selected = select ( feature ) nset |= set ( selected ) for combo in combinations ( selected , 2 ) : combo = tuple ( sorted ( combo ) ) pairs [ combo ] += 1 if paper in attributes : eattrs [ combo ] = attributes [ paper ] # Generate node attributes. for n in list ( nset ) : nattrs [ n ] [ 'count' ] = featureset . count ( n ) nattrs [ n ] [ 'documentCount' ] = featureset . documentCount ( n ) return _generate_graph ( nx . Graph , pairs , edge_attrs = eattrs , node_attrs = nattrs , min_weight = min_weight )
A network of feature elements linked by their joint occurrence in papers .
532
13
241,962
def coupling ( corpus_or_featureset , featureset_name = None , min_weight = 1 , filter = lambda f , v , c , dc : True , node_attrs = [ ] ) : featureset = _get_featureset ( corpus_or_featureset , featureset_name ) c = lambda f : featureset . count ( f ) # Overall count. dc = lambda f : featureset . documentCount ( f ) # Document count. f = lambda elem : featureset . index [ elem ] v = lambda p , f : featureset . features [ p ] . value ( f ) select = lambda p , elem : filter ( f ( elem ) , v ( p , f ( elem ) ) , c ( f ( elem ) ) , dc ( f ( elem ) ) ) pairs = defaultdict ( list ) for elem , papers in featureset . with_feature . iteritems ( ) : selected = [ p for p in papers if select ( p , elem ) ] for combo in combinations ( selected , 2 ) : combo = tuple ( sorted ( combo ) ) pairs [ combo ] . append ( featureset . index [ elem ] ) graph = nx . Graph ( ) for combo , features in pairs . iteritems ( ) : count = len ( features ) if count >= min_weight : graph . add_edge ( combo [ 0 ] , combo [ 1 ] , features = features , weight = count ) # Add node attributes. for attr in node_attrs : for node in graph . nodes ( ) : value = '' if node in corpus_or_featureset : paper = corpus_or_featureset [ node ] if hasattr ( paper , attr ) : value = getattr ( paper , attr ) if value is None : value = '' elif callable ( value ) : value = value ( ) graph . node [ node ] [ attr ] = value return graph
A network of papers linked by their joint posession of features .
413
13
241,963
def multipartite ( corpus , featureset_names , min_weight = 1 , filters = { } ) : pairs = Counter ( ) node_type = { corpus . _generate_index ( p ) : { 'type' : 'paper' } for p in corpus . papers } for featureset_name in featureset_names : ftypes = { } featureset = _get_featureset ( corpus , featureset_name ) for paper , feature in featureset . iteritems ( ) : if featureset_name in filters : if not filters [ featureset_name ] ( featureset , feature ) : continue if len ( feature ) < 1 : continue for f in list ( zip ( * feature ) ) [ 0 ] : ftypes [ f ] = { 'type' : featureset_name } pairs [ ( paper , f ) ] += 1 node_type . update ( ftypes ) return _generate_graph ( nx . DiGraph , pairs , node_attrs = node_type , min_weight = min_weight )
A network of papers and one or more featuresets .
222
11
241,964
def _strip_punctuation ( s ) : if type ( s ) is str and not PYTHON_3 : # Bytestring (default in Python 2.x). return s . translate ( string . maketrans ( "" , "" ) , string . punctuation ) else : # Unicode string (default in Python 3.x). translate_table = dict ( ( ord ( char ) , u'' ) for char in u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~' ) return s . translate ( translate_table )
Removes all punctuation characters from a string .
130
10
241,965
def overlap ( listA , listB ) : if ( listA is None ) or ( listB is None ) : return [ ] else : return list ( set ( listA ) & set ( listB ) )
Return list of objects shared by listA listB .
45
11
241,966
def subdict ( super_dict , keys ) : sub_dict = { } valid_keys = super_dict . keys ( ) for key in keys : if key in valid_keys : sub_dict [ key ] = super_dict [ key ] return sub_dict
Returns a subset of the super_dict with the specified keys .
57
13
241,967
def concat_list ( listA , listB , delim = ' ' ) : # Lists must be of equal length. if len ( listA ) != len ( listB ) : raise IndexError ( 'Input lists are not parallel.' ) # Concatenate lists. listC = [ ] for i in xrange ( len ( listA ) ) : app = listA [ i ] + delim + listB [ i ] listC . append ( app ) return listC
Concatenate list elements pair - wise with the delim character Returns the concatenated list Raises index error if lists are not parallel
101
28
241,968
def strip_non_ascii ( s ) : stripped = ( c for c in s if 0 < ord ( c ) < 127 ) clean_string = u'' . join ( stripped ) return clean_string
Returns the string without non - ASCII characters .
45
9
241,969
def dict_from_node ( node , recursive = False ) : dict = { } for snode in node : if len ( snode ) > 0 : if recursive : # Will drill down until len(snode) <= 0. value = dict_from_node ( snode , True ) else : value = len ( snode ) elif snode . text is not None : value = snode . text else : value = u'' if snode . tag in dict . keys ( ) : # If there are multiple subelements # with the same tag, then the value # of the element should be a list # rather than a dict. if type ( dict [ snode . tag ] ) is list : # If a list has already been # started, just append to # it. dict [ snode . tag ] . append ( value ) else : dict [ snode . tag ] = [ dict [ snode . tag ] , value ] else : dict [ snode . tag ] = value # Default behavior. return dict
Converts ElementTree node to a dictionary .
216
9
241,970
def feed ( self , data ) : try : self . rawdata = self . rawdata + data except TypeError : data = unicode ( data ) self . rawdata = self . rawdata + data self . goahead ( 0 )
added this check as sometimes we are getting the data in integer format instead of string
50
16
241,971
def serializePaper ( self ) : pid = tethnedao . getMaxPaperID ( ) papers_details = [ ] for paper in self . corpus : pid = pid + 1 paper_key = getattr ( paper , Serialize . paper_source_map [ self . source ] ) self . paperIdMap [ paper_key ] = pid paper_data = { "model" : "django-tethne.paper" , "pk" : self . paperIdMap [ paper_key ] , "fields" : { "paper_id" : paper_key , "corpus" : self . corpus_id , "pub_date" : getattr ( paper , 'date' , '' ) , "volume" : getattr ( paper , 'volume' , '' ) , "title" : getattr ( paper , 'title' , '' ) , "abstract" : getattr ( paper , 'abstract' , '' ) , } } papers_details . append ( paper_data ) return papers_details
This method creates a fixture for the django - tethne_paper model .
221
17
241,972
def serializeCitation ( self ) : citation_details = [ ] citation_id = tethnedao . getMaxCitationID ( ) for citation in self . corpus . features [ 'citations' ] . index . values ( ) : date_match = re . search ( r'(\d+)' , citation ) if date_match is not None : date = date_match . group ( 1 ) if date_match is None : date_match = re . search ( r"NONE" , citation ) date = date_match . group ( ) first_author = citation . replace ( '_' , ' ' ) . split ( date ) [ 0 ] . rstrip ( ) journal = citation . replace ( '_' , ' ' ) . split ( date ) [ 1 ] . lstrip ( ) citation_key = citation if citation_key not in self . citationIdMap : citation_id += 1 self . citationIdMap [ citation_key ] = citation_id citation_data = { "model" : "django-tethne.citation" , "pk" : citation_id , "fields" : { "literal" : citation , "journal" : journal , "first_author" : first_author , "date" : date } } citation_details . append ( citation_data ) return citation_details
This method creates a fixture for the django - tethne_citation model .
288
18
241,973
def serializeInstitution ( self ) : institution_data = [ ] institution_instance_data = [ ] affiliation_data = [ ] affiliation_id = tethnedao . getMaxAffiliationID ( ) institution_id = tethnedao . getMaxInstitutionID ( ) institution_instance_id = tethnedao . getMaxInstitutionInstanceID ( ) for paper in self . corpus : if hasattr ( paper , 'authorAddress' ) : paper_key = getattr ( paper , Serialize . paper_source_map [ self . source ] ) if type ( paper . authorAddress ) is unicode : institution_id += 1 institution_instance_id += 1 institute_literal , authors = SerializeUtility . get_auth_inst ( paper . authorAddress ) institute_row , institute_instance_row = self . get_details_from_inst_literal ( institute_literal , institution_id , institution_instance_id , paper_key ) if institute_row : institution_data . append ( institute_row ) institution_instance_data . append ( institute_instance_row ) if authors : for author in authors : affiliation_id += 1 affiliation_row = self . get_affiliation_details ( author , affiliation_id , institute_literal ) affiliation_data . append ( affiliation_row ) elif type ( paper . authorAddress ) is list : for address in paper . authorAddress : institution_id += 1 institution_instance_id += 1 institute_literal , authors = SerializeUtility . get_auth_inst ( address ) institute_row , institute_instance_row = self . get_details_from_inst_literal ( institute_literal , institution_id , institution_instance_id , paper_key ) if institute_row : institution_data . append ( institute_row ) institution_instance_data . append ( institute_instance_row ) if authors is None : authors = prevAuthors for author in authors : affiliation_id += 1 affiliation_row = self . get_affiliation_details ( author , affiliation_id , institute_literal ) affiliation_data . append ( affiliation_row ) prevAuthors = authors return institution_data , institution_instance_data , affiliation_data
This method creates a fixture for the django - tethne_citation_institution model .
483
21
241,974
def get_details_from_inst_literal ( self , institute_literal , institution_id , institution_instance_id , paper_key ) : institute_details = institute_literal . split ( ',' ) institute_name = institute_details [ 0 ] country = institute_details [ len ( institute_details ) - 1 ] . lstrip ( ) . replace ( '.' , '' ) institute_row = None zipcode = "" state = "" city = "" if 'USA' in country : temp = country if ( len ( temp . split ( ) ) ) == 3 : country = temp . split ( ) [ 2 ] zipcode = temp . split ( ) [ 1 ] state = temp . split ( ) [ 0 ] elif ( len ( temp . split ( ) ) ) == 2 : country = temp . split ( ) [ 1 ] state = temp . split ( ) [ 0 ] city = institute_details [ len ( institute_details ) - 2 ] . lstrip ( ) addressline1 = "" for i in range ( 1 , len ( institute_details ) - 1 , 1 ) : if i != len ( institute_details ) - 2 : addressline1 = addressline1 + institute_details [ i ] + ',' else : addressline1 = addressline1 + institute_details [ i ] if institute_literal not in self . instituteIdMap : self . instituteIdMap [ institute_literal ] = institution_id institute_row = { "model" : "django-tethne.institution" , "pk" : institution_id , "fields" : { "institute_name" : institute_name , "addressLine1" : addressline1 , "country" : country , "zip" : zipcode , "state" : state , "city" : city } } department = "" if re . search ( 'Dept([^,]*),' , institute_literal ) is not None : department = re . search ( 'Dept([^,]*),' , institute_literal ) . group ( ) . replace ( ',' , '' ) institute_instance_row = { "model" : "django-tethne.institution_instance" , "pk" : institution_instance_id , "fields" : { "institution" : self . instituteIdMap [ institute_literal ] , "literal" : institute_literal , "institute_name" : institute_name , "addressLine1" : addressline1 , "country" : country , "paper" : self . paperIdMap [ paper_key ] , "department" : department , "zip" : zipcode , "state" : state , "city" : city } } return institute_row , institute_instance_row
This method parses the institute literal to get the following 1 . Department naame 2 . Country 3 . University name 4 . ZIP STATE AND CITY ( Only if the country is USA . For other countries the standard may vary . So parsing these values becomes very difficult . However the complete address can be found in the column AddressLine1
599
66
241,975
def get_affiliation_details ( self , value , affiliation_id , institute_literal ) : tokens = tuple ( [ t . upper ( ) . strip ( ) for t in value . split ( ',' ) ] ) if len ( tokens ) == 1 : tokens = value . split ( ) if len ( tokens ) > 0 : if len ( tokens ) > 1 : aulast , auinit = tokens [ 0 : 2 ] else : aulast = tokens [ 0 ] auinit = '' else : aulast , auinit = tokens [ 0 ] , '' aulast = _strip_punctuation ( aulast ) . upper ( ) auinit = _strip_punctuation ( auinit ) . upper ( ) author_key = auinit + aulast affiliation_row = { "model" : "django-tethne.affiliation" , "pk" : affiliation_id , "fields" : { "author" : self . authorIdMap [ author_key ] , "institution" : self . instituteIdMap [ institute_literal ] } } return affiliation_row
This method is used to map the Affiliation between an author and Institution .
239
15
241,976
def start ( self ) : while not self . is_start ( self . current_tag ) : self . next ( ) self . new_entry ( )
Find the first data entry and prepare to parse .
33
10
241,977
def handle ( self , tag , data ) : if self . is_end ( tag ) : self . postprocess_entry ( ) if self . is_start ( tag ) : self . new_entry ( ) if not data or not tag : return if getattr ( self , 'parse_only' , None ) and tag not in self . parse_only : return # TODO: revisit encoding here. if isinstance ( data , unicode ) : data = unicodedata . normalize ( 'NFKD' , data ) #.encode('utf-8','ignore') handler = self . _get_handler ( tag ) if handler is not None : data = handler ( data ) if tag in self . tags : # Rename the field. tag = self . tags [ tag ] # Multiline fields are represented as lists of values. if hasattr ( self . data [ - 1 ] , tag ) : value = getattr ( self . data [ - 1 ] , tag ) if tag in self . concat_fields : value = ' ' . join ( [ value , unicode ( data ) ] ) elif type ( value ) is list : value . append ( data ) elif value not in [ None , '' ] : value = [ value , data ] else : value = data setattr ( self . data [ - 1 ] , tag , value ) self . fields . add ( tag )
Process a single line of data and store the result .
298
11
241,978
def open ( self ) : if not os . path . exists ( self . path ) : raise IOError ( "No such path: {0}" . format ( self . path ) ) with open ( self . path , "rb" ) as f : msg = f . read ( ) result = chardet . detect ( msg ) self . buffer = codecs . open ( self . path , "rb" , encoding = result [ 'encoding' ] ) self . at_eof = False
Open the data file .
105
5
241,979
def next ( self ) : line = self . buffer . readline ( ) while line == '\n' : # Skip forward to the next line with content. line = self . buffer . readline ( ) if line == '' : # End of file. self . at_eof = True return None , None match = re . match ( '([A-Z]{2}|[C][1])\W(.*)' , line ) if match is not None : self . current_tag , data = match . groups ( ) else : self . current_tag = self . last_tag data = line . strip ( ) return self . current_tag , _cast ( data )
Get the next line of data .
146
7
241,980
def coauthors ( corpus , min_weight = 1 , edge_attrs = [ 'ayjid' , 'date' ] , * * kwargs ) : return cooccurrence ( corpus , 'authors' , min_weight = min_weight , edge_attrs = edge_attrs , * * kwargs )
A graph describing joint authorship in corpus .
71
9
241,981
def extract_text ( fpath ) : with codecs . open ( fpath , 'r' ) as f : # Determine the encoding of the file. document = f . read ( ) encoding = chardet . detect ( document ) [ 'encoding' ] document = document . decode ( encoding ) tokens = [ ] sentences = [ ] i = 0 for sentence in nltk . tokenize . sent_tokenize ( document ) : sentences . append ( i ) for word in nltk . tokenize . word_tokenize ( sentence ) : tokens . append ( word ) i += 1 contexts = [ ( 'sentence' , sentences ) ] return StructuredFeature ( tokens , contexts )
Extracts structured text content from a plain - text file at fpath .
148
16
241,982
def extract_pdf ( fpath ) : with codecs . open ( fpath , 'r' ) as f : # Determine the encoding of the file. document = slate . PDF ( f ) encoding = chardet . detect ( document [ 0 ] ) tokens = [ ] pages = [ ] sentences = [ ] tokenizer = nltk . tokenize . TextTilingTokenizer ( ) i = 0 for page in document : pages . append ( i ) # Decode using the correct encoding. page = page . decode ( encoding [ 'encoding' ] ) for sentence in nltk . tokenize . sent_tokenize ( page ) : sentences . append ( i ) for word in nltk . tokenize . word_tokenize ( sentence ) : if len ( word ) > 15 : words = nltk . tokenize . word_tokenize ( _infer_spaces ( word ) ) if mean ( [ len ( w ) for w in words ] ) > 2 : for w in words : tokens . append ( w ) i += 1 continue tokens . append ( word ) i += 1 contexts = [ ( 'page' , pages ) , ( 'sentence' , sentences ) ] return StructuredFeature ( tokens , contexts )
Extracts structured text content from a PDF at fpath .
264
13
241,983
def read ( path , corpus = True , index_by = 'uri' , follow_links = False , * * kwargs ) : # TODO: is there a case where `from_dir` would make sense? parser = ZoteroParser ( path , index_by = index_by , follow_links = follow_links ) papers = parser . parse ( ) if corpus : c = Corpus ( papers , index_by = index_by , * * kwargs ) if c . duplicate_papers : warnings . warn ( "Duplicate papers detected. Use the 'duplicate_papers' attribute of the corpus to get the list" , UserWarning ) for fset_name , fset_values in parser . full_text . iteritems ( ) : c . features [ fset_name ] = StructuredFeatureSet ( fset_values ) return c return papers
Read bibliographic data from Zotero RDF .
188
11
241,984
def handle_date ( self , value ) : try : return iso8601 . parse_date ( unicode ( value ) ) . year except iso8601 . ParseError : for datefmt in ( "%B %d, %Y" , "%Y-%m" , "%Y-%m-%d" , "%m/%d/%Y" ) : try : # TODO: remove str coercion. return datetime . strptime ( unicode ( value ) , datefmt ) . date ( ) . year except ValueError : pass
Attempt to coerced date to ISO8601 .
120
9
241,985
def postprocess_link ( self , entry ) : if not self . follow_links : return if type ( entry . link ) is not list : entry . link = [ entry . link ] for link in list ( entry . link ) : if not os . path . exists ( link ) : continue mime_type = magic . from_file ( link , mime = True ) if mime_type == 'application/pdf' : structuredfeature = extract_pdf ( link ) elif mime_type == 'text/plain' : structuredfeature = extract_text ( link ) else : structuredfeature = None if not structuredfeature : continue fset_name = mime_type . split ( '/' ) [ - 1 ] + '_text' if not fset_name in self . full_text : self . full_text [ fset_name ] = { } if hasattr ( self , 'index_by' ) : ident = getattr ( entry , self . index_by ) if type ( ident ) is list : ident = ident [ 0 ] else : # If `index_by` is not set, use `uri` by default. ident = entry . uri self . full_text [ fset_name ] [ ident ] = structuredfeature
Attempt to load full - text content from resource .
269
10
241,986
def webpush ( subscription_info , data = None , vapid_private_key = None , vapid_claims = None , content_encoding = "aes128gcm" , curl = False , timeout = None , ttl = 0 ) : vapid_headers = None if vapid_claims : if not vapid_claims . get ( 'aud' ) : url = urlparse ( subscription_info . get ( 'endpoint' ) ) aud = "{}://{}" . format ( url . scheme , url . netloc ) vapid_claims [ 'aud' ] = aud if not vapid_claims . get ( 'exp' ) : # encryption lives for 12 hours vapid_claims [ 'exp' ] = int ( time . time ( ) ) + ( 12 * 60 * 60 ) if not vapid_private_key : raise WebPushException ( "VAPID dict missing 'private_key'" ) if isinstance ( vapid_private_key , Vapid ) : vv = vapid_private_key elif os . path . isfile ( vapid_private_key ) : # Presume that key from file is handled correctly by # py_vapid. vv = Vapid . from_file ( private_key_file = vapid_private_key ) # pragma no cover else : vv = Vapid . from_string ( private_key = vapid_private_key ) vapid_headers = vv . sign ( vapid_claims ) response = WebPusher ( subscription_info ) . send ( data , vapid_headers , ttl = ttl , content_encoding = content_encoding , curl = curl , timeout = timeout , ) if not curl and response . status_code > 202 : raise WebPushException ( "Push failed: {} {}" . format ( response . status_code , response . reason ) , response = response ) return response
One call solution to endcode and send data to the endpoint contained in subscription_info using optional VAPID auth headers .
424
25
241,987
def encode ( self , data , content_encoding = "aes128gcm" ) : # Salt is a random 16 byte array. if not data : return if not self . auth_key or not self . receiver_key : raise WebPushException ( "No keys specified in subscription info" ) salt = None if content_encoding not in self . valid_encodings : raise WebPushException ( "Invalid content encoding specified. " "Select from " + json . dumps ( self . valid_encodings ) ) if content_encoding == "aesgcm" : salt = os . urandom ( 16 ) # The server key is an ephemeral ECDH key used only for this # transaction server_key = ec . generate_private_key ( ec . SECP256R1 , default_backend ( ) ) crypto_key = server_key . public_key ( ) . public_bytes ( encoding = serialization . Encoding . X962 , format = serialization . PublicFormat . UncompressedPoint ) if isinstance ( data , six . string_types ) : data = bytes ( data . encode ( 'utf8' ) ) if content_encoding == "aes128gcm" : encrypted = http_ece . encrypt ( data , salt = salt , private_key = server_key , dh = self . receiver_key , auth_secret = self . auth_key , version = content_encoding ) reply = CaseInsensitiveDict ( { 'body' : encrypted } ) else : crypto_key = base64 . urlsafe_b64encode ( crypto_key ) . strip ( b'=' ) encrypted = http_ece . encrypt ( data , salt = salt , private_key = server_key , keyid = crypto_key . decode ( ) , dh = self . receiver_key , auth_secret = self . auth_key , version = content_encoding ) reply = CaseInsensitiveDict ( { 'crypto_key' : crypto_key , 'body' : encrypted , } ) if salt : reply [ 'salt' ] = base64 . urlsafe_b64encode ( salt ) . strip ( b'=' ) return reply
Encrypt the data .
478
5
241,988
def send ( self , data = None , headers = None , ttl = 0 , gcm_key = None , reg_id = None , content_encoding = "aes128gcm" , curl = False , timeout = None ) : # Encode the data. if headers is None : headers = dict ( ) encoded = { } headers = CaseInsensitiveDict ( headers ) if data : encoded = self . encode ( data , content_encoding ) if "crypto_key" in encoded : # Append the p256dh to the end of any existing crypto-key crypto_key = headers . get ( "crypto-key" , "" ) if crypto_key : # due to some confusion by a push service provider, we # should use ';' instead of ',' to append the headers. # see # https://github.com/webpush-wg/webpush-encryption/issues/6 crypto_key += ';' crypto_key += ( "dh=" + encoded [ "crypto_key" ] . decode ( 'utf8' ) ) headers . update ( { 'crypto-key' : crypto_key } ) if "salt" in encoded : headers . update ( { 'encryption' : "salt=" + encoded [ 'salt' ] . decode ( 'utf8' ) } ) headers . update ( { 'content-encoding' : content_encoding , } ) if gcm_key : # guess if it is a legacy GCM project key or actual FCM key # gcm keys are all about 40 chars (use 100 for confidence), # fcm keys are 153-175 chars if len ( gcm_key ) < 100 : endpoint = 'https://android.googleapis.com/gcm/send' else : endpoint = 'https://fcm.googleapis.com/fcm/send' reg_ids = [ ] if not reg_id : reg_id = self . subscription_info [ 'endpoint' ] . rsplit ( '/' , 1 ) [ - 1 ] reg_ids . append ( reg_id ) gcm_data = dict ( ) gcm_data [ 'registration_ids' ] = reg_ids if data : gcm_data [ 'raw_data' ] = base64 . b64encode ( encoded . get ( 'body' ) ) . decode ( 'utf8' ) gcm_data [ 'time_to_live' ] = int ( headers [ 'ttl' ] if 'ttl' in headers else ttl ) encoded_data = json . dumps ( gcm_data ) headers . update ( { 'Authorization' : 'key=' + gcm_key , 'Content-Type' : 'application/json' , } ) else : encoded_data = encoded . get ( 'body' ) endpoint = self . subscription_info [ 'endpoint' ] if 'ttl' not in headers or ttl : headers [ 'ttl' ] = str ( ttl or 0 ) # Additionally useful headers: # Authorization / Crypto-Key (VAPID headers) if curl : return self . as_curl ( endpoint , encoded_data , headers ) return self . requests_method . post ( endpoint , data = encoded_data , headers = headers , timeout = timeout )
Encode and send the data to the Push Service .
713
11
241,989
def calendarplot ( data , how = 'sum' , yearlabels = True , yearascending = True , yearlabel_kws = None , subplot_kws = None , gridspec_kws = None , fig_kws = None , * * kwargs ) : yearlabel_kws = yearlabel_kws or { } subplot_kws = subplot_kws or { } gridspec_kws = gridspec_kws or { } fig_kws = fig_kws or { } years = np . unique ( data . index . year ) if not yearascending : years = years [ : : - 1 ] fig , axes = plt . subplots ( nrows = len ( years ) , ncols = 1 , squeeze = False , subplot_kw = subplot_kws , gridspec_kw = gridspec_kws , * * fig_kws ) axes = axes . T [ 0 ] # We explicitely resample by day only once. This is an optimization. if how is None : by_day = data else : if _pandas_18 : by_day = data . resample ( 'D' ) . agg ( how ) else : by_day = data . resample ( 'D' , how = how ) ylabel_kws = dict ( fontsize = 32 , color = kwargs . get ( 'fillcolor' , 'whitesmoke' ) , fontweight = 'bold' , fontname = 'Arial' , ha = 'center' ) ylabel_kws . update ( yearlabel_kws ) max_weeks = 0 for year , ax in zip ( years , axes ) : yearplot ( by_day , year = year , how = None , ax = ax , * * kwargs ) max_weeks = max ( max_weeks , ax . get_xlim ( ) [ 1 ] ) if yearlabels : ax . set_ylabel ( str ( year ) , * * ylabel_kws ) # In a leap year it might happen that we have 54 weeks (e.g., 2012). # Here we make sure the width is consistent over all years. for ax in axes : ax . set_xlim ( 0 , max_weeks ) # Make the axes look good. plt . tight_layout ( ) return fig , axes
Plot a timeseries as a calendar heatmap .
515
10
241,990
def geosgeometry_str_to_struct ( value ) : result = geos_ptrn . match ( value ) if not result : return None return { 'srid' : result . group ( 1 ) , 'x' : result . group ( 2 ) , 'y' : result . group ( 3 ) , }
Parses a geosgeometry string into struct .
70
12
241,991
def get_env ( name , default = None ) : if name in os . environ : return os . environ [ name ] if default is not None : return default error_msg = "Set the {} env variable" . format ( name ) raise ImproperlyConfigured ( error_msg )
Get the environment variable or return exception
63
7
241,992
def user_defined_symbols ( self ) : sym_in_current = set ( self . symtable . keys ( ) ) sym_from_construction = set ( self . no_deepcopy ) unique_symbols = sym_in_current . difference ( sym_from_construction ) return unique_symbols
Return a set of symbols that have been added to symtable after construction .
72
15
241,993
def unimplemented ( self , node ) : self . raise_exception ( node , exc = NotImplementedError , msg = "'%s' not supported" % ( node . __class__ . __name__ ) )
Unimplemented nodes .
49
6
241,994
def raise_exception ( self , node , exc = None , msg = '' , expr = None , lineno = None ) : if self . error is None : self . error = [ ] if expr is None : expr = self . expr if len ( self . error ) > 0 and not isinstance ( node , ast . Module ) : msg = '%s' % msg err = ExceptionHolder ( node , exc = exc , msg = msg , expr = expr , lineno = lineno ) self . _interrupt = ast . Break ( ) self . error . append ( err ) if self . error_msg is None : self . error_msg = "at expr='%s'" % ( self . expr ) elif len ( msg ) > 0 : self . error_msg = msg if exc is None : try : exc = self . error [ 0 ] . exc except : exc = RuntimeError raise exc ( self . error_msg )
Add an exception .
201
4
241,995
def run ( self , node , expr = None , lineno = None , with_raise = True ) : # Note: keep the 'node is None' test: internal code here may run # run(None) and expect a None in return. if time . time ( ) - self . start_time > self . max_time : raise RuntimeError ( ERR_MAX_TIME . format ( self . max_time ) ) out = None if len ( self . error ) > 0 : return out if node is None : return out if isinstance ( node , str ) : node = self . parse ( node ) if lineno is not None : self . lineno = lineno if expr is not None : self . expr = expr # get handler for this node: # on_xxx with handle nodes of type 'xxx', etc try : handler = self . node_handlers [ node . __class__ . __name__ . lower ( ) ] except KeyError : return self . unimplemented ( node ) # run the handler: this will likely generate # recursive calls into this run method. try : ret = handler ( node ) if isinstance ( ret , enumerate ) : ret = list ( ret ) return ret except : if with_raise : self . raise_exception ( node , expr = expr )
Execute parsed Ast representation for an expression .
276
9
241,996
def eval ( self , expr , lineno = 0 , show_errors = True ) : self . lineno = lineno self . error = [ ] self . start_time = time . time ( ) try : node = self . parse ( expr ) except : errmsg = exc_info ( ) [ 1 ] if len ( self . error ) > 0 : errmsg = "\n" . join ( self . error [ 0 ] . get_error ( ) ) if not show_errors : try : exc = self . error [ 0 ] . exc except : exc = RuntimeError raise exc ( errmsg ) print ( errmsg , file = self . err_writer ) return try : return self . run ( node , expr = expr , lineno = lineno ) except : errmsg = exc_info ( ) [ 1 ] if len ( self . error ) > 0 : errmsg = "\n" . join ( self . error [ 0 ] . get_error ( ) ) if not show_errors : try : exc = self . error [ 0 ] . exc except : exc = RuntimeError raise exc ( errmsg ) print ( errmsg , file = self . err_writer ) return
Evaluate a single statement .
250
7
241,997
def on_module ( self , node ) : # ():('body',) out = None for tnode in node . body : out = self . run ( tnode ) return out
Module def .
39
3
241,998
def on_assert ( self , node ) : # ('test', 'msg') if not self . run ( node . test ) : self . raise_exception ( node , exc = AssertionError , msg = node . msg ) return True
Assert statement .
52
4
241,999
def on_name ( self , node ) : # ('id', 'ctx') ctx = node . ctx . __class__ if ctx in ( ast . Param , ast . Del ) : return str ( node . id ) else : if node . id in self . symtable : return self . symtable [ node . id ] else : msg = "name '%s' is not defined" % node . id self . raise_exception ( node , exc = NameError , msg = msg )
Name node .
107
3