idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
18,300
def readGraph ( edgeList , nodeList = None , directed = False , idKey = 'ID' , eSource = 'From' , eDest = 'To' ) : progArgs = ( 0 , "Starting to reading graphs" ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , * * progKwargs ) as PBar : if directed : grph = nx . DiGraph ( ) else : grph = nx . Graph ( ) if nodeList : PBar . updateVal ( 0 , "Reading " + nodeList ) f = open ( os . path . expanduser ( os . path . abspath ( nodeList ) ) ) nFile = csv . DictReader ( f ) for line in nFile : vals = line ndID = vals [ idKey ] del vals [ idKey ] if len ( vals ) > 0 : grph . add_node ( ndID , * * vals ) else : grph . add_node ( ndID ) f . close ( ) PBar . updateVal ( .25 , "Reading " + edgeList ) f = open ( os . path . expanduser ( os . path . abspath ( edgeList ) ) ) eFile = csv . DictReader ( f ) for line in eFile : vals = line eFrom = vals [ eSource ] eTo = vals [ eDest ] del vals [ eSource ] del vals [ eDest ] if len ( vals ) > 0 : grph . add_edge ( eFrom , eTo , * * vals ) else : grph . add_edge ( eFrom , eTo ) PBar . finish ( "{} nodes and {} edges found" . format ( len ( grph . nodes ( ) ) , len ( grph . edges ( ) ) ) ) f . close ( ) return grph
Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files .
439
25
18,301
def writeGraph ( grph , name , edgeInfo = True , typing = False , suffix = 'csv' , overwrite = True , allSameAttribute = False ) : progArgs = ( 0 , "Writing the graph to files starting with: {}" . format ( name ) ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , * * progKwargs ) as PBar : if typing : if isinstance ( grph , nx . classes . digraph . DiGraph ) or isinstance ( grph , nx . classes . multidigraph . MultiDiGraph ) : grphType = "_directed" else : grphType = "_undirected" else : grphType = '' nameCompts = os . path . split ( os . path . expanduser ( os . path . normpath ( name ) ) ) if nameCompts [ 0 ] == '' and nameCompts [ 1 ] == '' : edgeListName = "edgeList" + grphType + '.' + suffix nodesAtrName = "nodeAttributes" + grphType + '.' + suffix elif nameCompts [ 0 ] == '' : edgeListName = nameCompts [ 1 ] + "_edgeList" + grphType + '.' + suffix nodesAtrName = nameCompts [ 1 ] + "_nodeAttributes" + grphType + '.' + suffix elif nameCompts [ 1 ] == '' : edgeListName = os . path . join ( nameCompts [ 0 ] , "edgeList" + grphType + '.' + suffix ) nodesAtrName = os . path . join ( nameCompts [ 0 ] , "nodeAttributes" + grphType + '.' + suffix ) else : edgeListName = os . path . join ( nameCompts [ 0 ] , nameCompts [ 1 ] + "_edgeList" + grphType + '.' + suffix ) nodesAtrName = os . path . join ( nameCompts [ 0 ] , nameCompts [ 1 ] + "_nodeAttributes" + grphType + '.' + suffix ) if not overwrite : if os . path . isfile ( edgeListName ) : raise OSError ( edgeListName + " already exists" ) if os . path . isfile ( nodesAtrName ) : raise OSError ( nodesAtrName + " already exists" ) writeEdgeList ( grph , edgeListName , extraInfo = edgeInfo , allSameAttribute = allSameAttribute , _progBar = PBar ) writeNodeAttributeFile ( grph , nodesAtrName , allSameAttribute = allSameAttribute , _progBar = PBar ) PBar . finish ( "{} nodes and {} edges written to file" . format ( len ( grph . nodes ( ) ) , len ( grph . edges ( ) ) ) )
Writes both the edge list and the node attribute list of _grph_ to files starting with _name_ .
656
24
18,302
def getNodeDegrees ( grph , weightString = "weight" , strictMode = False , returnType = int , edgeType = 'bi' ) : ndsDict = { } for nd in grph . nodes ( ) : ndsDict [ nd ] = returnType ( 0 ) for e in grph . edges ( data = True ) : if weightString : try : edgVal = returnType ( e [ 2 ] [ weightString ] ) except KeyError : if strictMode : raise KeyError ( "The edge from " + str ( e [ 0 ] ) + " to " + str ( e [ 1 ] ) + " does not have the attribute: '" + str ( weightString ) + "'" ) else : edgVal = returnType ( 1 ) else : edgVal = returnType ( 1 ) if edgeType == 'bi' : ndsDict [ e [ 0 ] ] += edgVal ndsDict [ e [ 1 ] ] += edgVal elif edgeType == 'in' : ndsDict [ e [ 1 ] ] += edgVal elif edgeType == 'out' : ndsDict [ e [ 0 ] ] += edgVal else : raise ValueError ( "edgeType must be 'bi', 'in', or 'out'" ) return ndsDict
Retunrs a dictionary of nodes to their degrees the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight . The Weights are then converted to the type returnType . If weightString is give as False instead each edge is counted as 1 .
290
71
18,303
def mergeGraphs ( targetGraph , addedGraph , incrementedNodeVal = 'count' , incrementedEdgeVal = 'weight' ) : for addedNode , attribs in addedGraph . nodes ( data = True ) : if incrementedNodeVal : try : targetGraph . node [ addedNode ] [ incrementedNodeVal ] += attribs [ incrementedNodeVal ] except KeyError : targetGraph . add_node ( addedNode , * * attribs ) else : if not targetGraph . has_node ( addedNode ) : targetGraph . add_node ( addedNode , * * attribs ) for edgeNode1 , edgeNode2 , attribs in addedGraph . edges ( data = True ) : if incrementedEdgeVal : try : targetGraph . edges [ edgeNode1 , edgeNode2 ] [ incrementedEdgeVal ] += attribs [ incrementedEdgeVal ] except KeyError : targetGraph . add_edge ( edgeNode1 , edgeNode2 , * * attribs ) else : if not targetGraph . Graph . has_edge ( edgeNode1 , edgeNode2 ) : targetGraph . add_edge ( edgeNode1 , edgeNode2 , * * attribs )
A quick way of merging graphs this is meant to be quick and is only intended for graphs generated by metaknowledge . This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method .
259
50
18,304
def AD ( val ) : retDict = { } for v in val : split = v . split ( ' : ' ) retDict [ split [ 0 ] ] = [ s for s in ' : ' . join ( split [ 1 : ] ) . replace ( '\n' , '' ) . split ( ';' ) if s != '' ] return retDict
Affiliation Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD s end with a semicolon
80
37
18,305
def AUID ( val ) : retDict = { } for v in val : split = v . split ( ' : ' ) retDict [ split [ 0 ] ] = ' : ' . join ( split [ 1 : ] ) return retDict
AuthorIdentifier one line only just need to undo the parser s effects
54
14
18,306
def isInteractive ( ) : if sys . stdout . isatty ( ) and os . name != 'nt' : #Hopefully everything but ms supports '\r' try : import threading except ImportError : return False else : return True else : return False
A basic check of if the program is running in interactive mode
56
12
18,307
def getInstitutions ( self , tags = None , seperator = ";" , _getTag = False ) : if tags is None : tags = [ ] elif isinstance ( tags , str ) : tags = [ tags ] for k in self . keys ( ) : if 'institution' in k . lower ( ) and k not in tags : tags . append ( k ) return super ( ) . getInvestigators ( tags = tags , seperator = seperator , _getTag = _getTag )
Returns a list with the names of the institution . The optional arguments are ignored
110
15
18,308
def writeRecord ( self , f ) : if self . bad : raise BadPubmedRecord ( "This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'" . format ( self . _sourceLine , self . _sourceFile ) ) else : authTags = { } for tag in authorBasedTags : for val in self . _fieldDict . get ( tag , [ ] ) : split = val . split ( ' : ' ) try : authTags [ split [ 0 ] ] . append ( "{0}{1}- {2}\n" . format ( tag , ' ' * ( 4 - len ( tag ) ) , ' : ' . join ( split [ 1 : ] ) . replace ( '\n' , '\n ' ) ) ) except KeyError : authTags [ split [ 0 ] ] = [ "{0}{1}- {2}\n" . format ( tag , ' ' * ( 4 - len ( tag ) ) , ' : ' . join ( split [ 1 : ] ) . replace ( '\n' , '\n ' ) ) ] for tag , value in self . _fieldDict . items ( ) : if tag in authorBasedTags : continue else : for v in value : f . write ( "{0}{1}- {2}\n" . format ( tag , ' ' * ( 4 - len ( tag ) ) , v . replace ( '\n' , '\n ' ) ) ) if tag == 'AU' : for authVal in authTags . get ( v , [ ] ) : f . write ( authVal )
This is nearly identical to the original the FAU tag is the only tag not writen in the same place doing so would require changing the parser and lots of extra logic .
362
35
18,309
def quickVisual ( G , showLabel = False ) : colours = "brcmykwg" f = plt . figure ( 1 ) ax = f . add_subplot ( 1 , 1 , 1 ) ndTypes = [ ] ndColours = [ ] layout = nx . spring_layout ( G , k = 4 / math . sqrt ( len ( G . nodes ( ) ) ) ) for nd in G . nodes ( data = True ) : if 'type' in nd [ 1 ] : if nd [ 1 ] [ 'type' ] not in ndTypes : ndTypes . append ( nd [ 1 ] [ 'type' ] ) ndColours . append ( colours [ ndTypes . index ( nd [ 1 ] [ 'type' ] ) % len ( colours ) ] ) elif len ( ndColours ) > 1 : raise RuntimeError ( "Some nodes do not have a type" ) if len ( ndColours ) < 1 : nx . draw_networkx_nodes ( G , pos = layout , node_color = colours [ 0 ] , node_shape = '8' , node_size = 100 , ax = ax ) else : nx . draw_networkx_nodes ( G , pos = layout , node_color = ndColours , node_shape = '8' , node_size = 100 , ax = ax ) nx . draw_networkx_edges ( G , pos = layout , width = .7 , ax = ax ) if showLabel : nx . draw_networkx_labels ( G , pos = layout , font_size = 8 , ax = ax ) plt . axis ( 'off' ) f . set_facecolor ( 'w' )
Just makes a simple _matplotlib_ figure and displays it with each node coloured by its type . You can add labels with _showLabel_ . This looks a bit nicer than the one provided my _networkx_ s defaults .
383
48
18,310
def graphDensityContourPlot ( G , iters = 50 , layout = None , layoutScaleFactor = 1 , overlay = False , nodeSize = 10 , axisSamples = 100 , blurringFactor = .1 , contours = 15 , graphType = 'coloured' ) : from mpl_toolkits . mplot3d import Axes3D if not isinstance ( G , nx . classes . digraph . DiGraph ) and not isinstance ( G , nx . classes . graph . Graph ) : raise TypeError ( "{} is not a valid input." . format ( type ( G ) ) ) if layout is None : layout = nx . spring_layout ( G , scale = axisSamples - 1 , iterations = iters ) grid = np . zeros ( [ axisSamples , axisSamples ] , dtype = np . float32 ) for v in layout . values ( ) : x , y = tuple ( int ( x ) for x in v . round ( 0 ) ) grid [ y ] [ x ] += 1 elif isinstance ( layout , dict ) : layout = layout . copy ( ) grid = np . zeros ( [ axisSamples , axisSamples ] , dtype = np . float32 ) multFactor = ( axisSamples - 1 ) / layoutScaleFactor for k in layout . keys ( ) : tmpPos = layout [ k ] * multFactor layout [ k ] = tmpPos x , y = tuple ( int ( x ) for x in tmpPos . round ( 0 ) ) grid [ y ] [ x ] += 1 else : raise TypeError ( "{} is not a valid input." . format ( type ( layout ) ) ) fig = plt . figure ( ) #axis = fig.add_subplot(111) axis = fig . gca ( projection = '3d' ) if overlay : nx . draw_networkx ( G , pos = layout , ax = axis , node_size = nodeSize , with_labels = False , edgelist = [ ] ) grid = ndi . gaussian_filter ( grid , ( blurringFactor * axisSamples , blurringFactor * axisSamples ) ) X = Y = np . arange ( 0 , axisSamples , 1 ) X , Y = np . meshgrid ( X , Y ) if graphType == "solid" : CS = axis . plot_surface ( X , Y , grid ) else : CS = axis . contourf ( X , Y , grid , contours ) axis . set_xlabel ( 'X' ) axis . set_ylabel ( 'Y' ) axis . set_zlabel ( 'Node Density' )
Creates a 3D plot giving the density of nodes on a 2D plane as a surface in 3D .
573
23
18,311
def makeBiDirectional ( d ) : dTmp = d . copy ( ) for k in d : dTmp [ d [ k ] ] = k return dTmp
Helper for generating tagNameConverter Makes dict that maps from key to value and back
39
18
18,312
def reverseDict ( d ) : retD = { } for k in d : retD [ d [ k ] ] = k return retD
Helper for generating fullToTag Makes dict of value to key
31
12
18,313
def makeNodeTuple ( citation , idVal , nodeInfo , fullInfo , nodeType , count , coreCitesDict , coreValues , detailedValues , addCR ) : d = { } if nodeInfo : if nodeType == 'full' : if coreValues : if citation in coreCitesDict : R = coreCitesDict [ citation ] d [ 'MK-ID' ] = R . id if not detailedValues : infoVals = [ ] for tag in coreValues : tagVal = R . get ( tag ) if isinstance ( tagVal , str ) : infoVals . append ( tagVal . replace ( ',' , '' ) ) elif isinstance ( tagVal , list ) : infoVals . append ( tagVal [ 0 ] . replace ( ',' , '' ) ) else : pass d [ 'info' ] = ', ' . join ( infoVals ) else : for tag in coreValues : v = R . get ( tag , None ) if isinstance ( v , list ) : d [ tag ] = '|' . join ( sorted ( v ) ) else : d [ tag ] = v d [ 'inCore' ] = True if addCR : d [ 'citations' ] = '|' . join ( ( str ( c ) for c in R . get ( 'citations' , [ ] ) ) ) else : d [ 'MK-ID' ] = 'None' d [ 'info' ] = citation . allButDOI ( ) d [ 'inCore' ] = False if addCR : d [ 'citations' ] = '' else : d [ 'info' ] = citation . allButDOI ( ) elif nodeType == 'journal' : if citation . isJournal ( ) : d [ 'info' ] = str ( citation . FullJournalName ( ) ) else : d [ 'info' ] = "None" elif nodeType == 'original' : d [ 'info' ] = str ( citation ) else : d [ 'info' ] = idVal if fullInfo : d [ 'fullCite' ] = str ( citation ) if count : d [ 'count' ] = 1 return ( idVal , d )
Makes a tuple of idVal and a dict of the selected attributes
472
14
18,314
def expandRecs ( G , RecCollect , nodeType , weighted ) : for Rec in RecCollect : fullCiteList = [ makeID ( c , nodeType ) for c in Rec . createCitation ( multiCite = True ) ] if len ( fullCiteList ) > 1 : for i , citeID1 in enumerate ( fullCiteList ) : if citeID1 in G : for citeID2 in fullCiteList [ i + 1 : ] : if citeID2 not in G : G . add_node ( citeID2 , * * G . node [ citeID1 ] ) if weighted : G . add_edge ( citeID1 , citeID2 , weight = 1 ) else : G . add_edge ( citeID1 , citeID2 ) elif weighted : try : G . edges [ citeID1 , citeID2 ] [ 'weight' ] += 1 except KeyError : G . add_edge ( citeID1 , citeID2 , weight = 1 ) for e1 , e2 , data in G . edges ( citeID1 , data = True ) : G . add_edge ( citeID2 , e2 , * * data )
Expand all the citations from _RecCollect_
254
10
18,315
def dropNonJournals ( self , ptVal = 'J' , dropBad = True , invert = False ) : if dropBad : self . dropBadEntries ( ) if invert : self . _collection = { r for r in self . _collection if r [ 'pubType' ] != ptVal . upper ( ) } else : self . _collection = { r for r in self . _collection if r [ 'pubType' ] == ptVal . upper ( ) }
Drops the non journal type Records from the collection this is done by checking _ptVal_ against the PT tag
104
23
18,316
def writeFile ( self , fname = None ) : if len ( self . _collectedTypes ) < 2 : recEncoding = self . peek ( ) . encoding ( ) else : recEncoding = 'utf-8' if fname : f = open ( fname , mode = 'w' , encoding = recEncoding ) else : f = open ( self . name [ : 200 ] + '.txt' , mode = 'w' , encoding = recEncoding ) if self . _collectedTypes == { 'WOSRecord' } : f . write ( "\ufeffFN Thomson Reuters Web of Science\u2122\n" ) f . write ( "VR 1.0\n" ) elif self . _collectedTypes == { 'MedlineRecord' } : f . write ( '\n' ) elif self . _collectedTypes == { 'ScopusRecord' } : f . write ( "\ufeff{}\n" . format ( ',' . join ( scopusHeader ) ) ) for R in self . _collection : R . writeRecord ( f ) f . write ( '\n' ) if self . _collectedTypes == { 'WOSRecord' } : f . write ( 'EF' ) f . close ( )
Writes the RecordCollection to a file the written file s format is identical to those download from WOS . The order of Records written is random .
273
30
18,317
def writeBib ( self , fname = None , maxStringLength = 1000 , wosMode = False , reducedOutput = False , niceIDs = True ) : if fname : f = open ( fname , mode = 'w' , encoding = 'utf-8' ) else : f = open ( self . name [ : 200 ] + '.bib' , mode = 'w' , encoding = 'utf-8' ) f . write ( "%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n" ) #I figure this is worth mentioning, as someone will get annoyed at none of the special characters being escaped and how terrible some of the fields look to humans for R in self : try : f . write ( '\n\n' ) f . write ( R . bibString ( maxLength = maxStringLength , WOSMode = wosMode , restrictedOutput = reducedOutput , niceID = niceIDs ) ) except BadWOSRecord : pass except AttributeError : raise RecordsNotCompatible ( "The Record '{}', with ID '{}' does not support writing to bibtext files." . format ( R , R . id ) ) f . close ( )
Writes a bibTex entry to _fname_ for each Record in the collection .
305
19
18,318
def makeDict ( self , onlyTheseTags = None , longNames = False , raw = False , numAuthors = True , genderCounts = True ) : if onlyTheseTags : for i in range ( len ( onlyTheseTags ) ) : if onlyTheseTags [ i ] in fullToTagDict : onlyTheseTags [ i ] = fullToTagDict [ onlyTheseTags [ i ] ] retrievedFields = onlyTheseTags else : retrievedFields = [ ] for R in self : tagsLst = [ t for t in R . keys ( ) if t not in retrievedFields ] retrievedFields += tagsLst if longNames : try : retrievedFields = [ tagToFullDict [ t ] for t in retrievedFields ] except KeyError : raise KeyError ( "One of the tags could not be converted to a long name." ) retDict = { k : [ ] for k in retrievedFields } if numAuthors : retDict [ "num-Authors" ] = [ ] if genderCounts : retDict . update ( { 'num-Male' : [ ] , 'num-Female' : [ ] , 'num-Unknown' : [ ] } ) for R in self : if numAuthors : retDict [ "num-Authors" ] . append ( len ( R . get ( 'authorsShort' , [ ] ) ) ) if genderCounts : m , f , u = R . authGenders ( _countsTuple = True ) retDict [ 'num-Male' ] . append ( m ) retDict [ 'num-Female' ] . append ( f ) retDict [ 'num-Unknown' ] . append ( u ) for k , v in R . subDict ( retrievedFields , raw = raw ) . items ( ) : retDict [ k ] . append ( v ) return retDict
Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection None is given when there is no value and they are in the same order across each tag .
409
43
18,319
def getCitations ( self , field = None , values = None , pandasFriendly = True , counts = True ) : retCites = [ ] if values is not None : if isinstance ( values , ( str , int , float ) ) or not isinstance ( values , collections . abc . Container ) : values = [ values ] for R in self : retCites += R . getCitations ( field = field , values = values , pandasFriendly = False ) if pandasFriendly : return _pandasPrep ( retCites , counts ) else : return list ( set ( retCites ) )
Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string year journal author s name and the number of times it occured .
134
36
18,320
def networkCoCitation ( self , dropAnon = True , nodeType = "full" , nodeInfo = True , fullInfo = False , weighted = True , dropNonJournals = False , count = True , keyWords = None , detailedCore = True , detailedCoreAttributes = False , coreOnly = False , expandedCore = False , addCR = False ) : allowedTypes = [ "full" , "original" , "author" , "journal" , "year" ] if nodeType not in allowedTypes : raise RCValueError ( "{} is not an allowed nodeType." . format ( nodeType ) ) coreValues = [ ] if bool ( detailedCore ) : try : for tag in detailedCore : coreValues . append ( normalizeToTag ( tag ) ) except TypeError : coreValues = [ 'id' , 'authorsFull' , 'year' , 'title' , 'journal' , 'volume' , 'beginningPage' ] tmpgrph = nx . Graph ( ) pcount = 0 progArgs = ( 0 , "Starting to make a co-citation network" ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , * * progKwargs ) as PBar : if coreOnly or coreValues or expandedCore : coreCitesDict = { R . createCitation ( ) : R for R in self } if coreOnly : coreCites = coreCitesDict . keys ( ) else : coreCites = None else : coreCitesDict = None coreCites = None for R in self : if PBar : pcount += 1 PBar . updateVal ( pcount / len ( self ) , "Analyzing: {}" . format ( R ) ) Cites = R . get ( 'citations' ) if Cites : filteredCites = filterCites ( Cites , nodeType , dropAnon , dropNonJournals , keyWords , coreCites ) addToNetwork ( tmpgrph , filteredCites , count , weighted , nodeType , nodeInfo , fullInfo , coreCitesDict , coreValues , detailedCoreAttributes , addCR , headNd = None ) if expandedCore : if PBar : PBar . updateVal ( .98 , "Expanding core Records" ) expandRecs ( tmpgrph , self , nodeType , weighted ) if PBar : PBar . finish ( "Done making a co-citation network from {}" . format ( self ) ) return tmpgrph
Creates a co - citation network for the RecordCollection .
566
12
18,321
def networkBibCoupling ( self , weighted = True , fullInfo = False , addCR = False ) : progArgs = ( 0 , "Make a citation network for coupling" ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , * * progKwargs ) as PBar : citeGrph = self . networkCitation ( weighted = False , directed = True , detailedCore = True , fullInfo = fullInfo , count = False , nodeInfo = True , addCR = addCR , _quiet = True ) pcount = 0 pmax = len ( citeGrph ) PBar . updateVal ( .2 , "Starting to classify nodes" ) workingGrph = nx . Graph ( ) couplingSet = set ( ) for n , d in citeGrph . nodes ( data = True ) : pcount += 1 PBar . updateVal ( .2 + .4 * ( pcount / pmax ) , "Classifying: {}" . format ( n ) ) if d [ 'inCore' ] : workingGrph . add_node ( n , * * d ) if citeGrph . in_degree ( n ) > 0 : couplingSet . add ( n ) pcount = 0 pmax = len ( couplingSet ) for n in couplingSet : PBar . updateVal ( .6 + .4 * ( pcount / pmax ) , "Coupling: {}" . format ( n ) ) citesLst = list ( citeGrph . in_edges ( n ) ) for i , edgeOuter in enumerate ( citesLst ) : outerNode = edgeOuter [ 0 ] for edgeInner in citesLst [ i + 1 : ] : innerNode = edgeInner [ 0 ] if weighted and workingGrph . has_edge ( outerNode , innerNode ) : workingGrph . edges [ outerNode , innerNode ] [ 'weight' ] += 1 elif weighted : workingGrph . add_edge ( outerNode , innerNode , weight = 1 ) else : workingGrph . add_edge ( outerNode , innerNode ) PBar . finish ( "Done making a bib-coupling network from {}" . format ( self ) ) return workingGrph
Creates a bibliographic coupling network based on citations for the RecordCollection .
511
16
18,322
def yearSplit ( self , startYear , endYear , dropMissingYears = True ) : recordsInRange = set ( ) for R in self : try : if R . get ( 'year' ) >= startYear and R . get ( 'year' ) <= endYear : recordsInRange . add ( R ) except TypeError : if dropMissingYears : pass else : raise RCret = RecordCollection ( recordsInRange , name = "{}({}-{})" . format ( self . name , startYear , endYear ) , quietStart = True ) RCret . _collectedTypes = self . _collectedTypes . copy ( ) return RCret
Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive .
139
22
18,323
def localCiteStats ( self , pandasFriendly = False , keyType = "citation" ) : count = 0 recCount = len ( self ) progArgs = ( 0 , "Starting to get the local stats on {}s." . format ( keyType ) ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , * * progKwargs ) as PBar : keyTypesLst = [ "citation" , "journal" , "year" , "author" ] citesDict = { } if keyType not in keyTypesLst : raise TypeError ( "{} is not a valid key type, only '{}' or '{}' are." . format ( keyType , "', '" . join ( keyTypesLst [ : - 1 ] ) , keyTypesLst [ - 1 ] ) ) for R in self : rCites = R . get ( 'citations' ) if PBar : count += 1 PBar . updateVal ( count / recCount , "Analysing: {}" . format ( R . UT ) ) if rCites : for c in rCites : if keyType == keyTypesLst [ 0 ] : cVal = c else : cVal = getattr ( c , keyType ) if cVal is None : continue if cVal in citesDict : citesDict [ cVal ] += 1 else : citesDict [ cVal ] = 1 if PBar : PBar . finish ( "Done, {} {} fields analysed" . format ( len ( citesDict ) , keyType ) ) if pandasFriendly : citeLst = [ ] countLst = [ ] for cite , occ in citesDict . items ( ) : citeLst . append ( cite ) countLst . append ( occ ) return { "Citations" : citeLst , "Counts" : countLst } else : return citesDict
Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values
447
23
18,324
def localCitesOf ( self , rec ) : localCites = [ ] if isinstance ( rec , Record ) : recCite = rec . createCitation ( ) if isinstance ( rec , str ) : try : recCite = self . getID ( rec ) except ValueError : try : recCite = Citation ( rec ) except AttributeError : raise ValueError ( "{} is not a valid WOS string or a valid citation string" . format ( recCite ) ) else : if recCite is None : return RecordCollection ( inCollection = localCites , name = "Records_citing_{}" . format ( rec ) , quietStart = True ) else : recCite = recCite . createCitation ( ) elif isinstance ( rec , Citation ) : recCite = rec else : raise ValueError ( "{} is not a valid input, rec must be a Record, string or Citation object." . format ( rec ) ) for R in self : rCites = R . get ( 'citations' ) if rCites : for cite in rCites : if recCite == cite : localCites . append ( R ) break return RecordCollection ( inCollection = localCites , name = "Records_citing_'{}'" . format ( rec ) , quietStart = True )
Takes in a Record WOS string citation string or Citation and returns a RecordCollection of all records that cite it .
288
24
18,325
def citeFilter ( self , keyString = '' , field = 'all' , reverse = False , caseSensitive = False ) : retRecs = [ ] keyString = str ( keyString ) for R in self : try : if field == 'all' : for cite in R . get ( 'citations' ) : if caseSensitive : if keyString in cite . original : retRecs . append ( R ) break else : if keyString . upper ( ) in cite . original . upper ( ) : retRecs . append ( R ) break elif field == 'author' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . author . upper ( ) : retRecs . append ( R ) break except AttributeError : pass elif field == 'journal' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . journal : retRecs . append ( R ) break except AttributeError : pass elif field == 'year' : for cite in R . get ( 'citations' ) : try : if int ( keyString ) == cite . year : retRecs . append ( R ) break except AttributeError : pass elif field == 'V' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . V : retRecs . append ( R ) break except AttributeError : pass elif field == 'P' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . P : retRecs . append ( R ) break except AttributeError : pass elif field == 'misc' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . misc : retRecs . append ( R ) break except AttributeError : pass elif field == 'anonymous' : for cite in R . get ( 'citations' ) : if cite . isAnonymous ( ) : retRecs . append ( R ) break elif field == 'bad' : for cite in R . get ( 'citations' ) : if cite . bad : retRecs . append ( R ) break except TypeError : pass if reverse : excluded = [ ] for R in self : if R not in retRecs : excluded . append ( R ) return RecordCollection ( inCollection = excluded , name = self . name , quietStart = True ) else : return RecordCollection ( inCollection = retRecs , name = self . name , quietStart = True )
Filters Records by some string _keyString_ in their citations and returns all Records with at least one citation possessing _keyString_ in the field given by _field_ .
570
36
18,326
def filterNonJournals ( citesLst , invert = False ) : retCites = [ ] for c in citesLst : if c . isJournal ( ) : if not invert : retCites . append ( c ) elif invert : retCites . append ( c ) return retCites
Removes the Citations from _citesLst_ that are not journals
67
16
18,327
def add ( self , elem ) : if isinstance ( elem , self . _allowedTypes ) : self . _collection . add ( elem ) self . _collectedTypes . add ( type ( elem ) . __name__ ) else : raise CollectionTypeError ( "{} can only contain '{}', '{}' is not allowed." . format ( type ( self ) . __name__ , self . _allowedTypes , elem ) )
Adds _elem_ to the collection .
98
9
18,328
def remove ( self , elem ) : try : return self . _collection . remove ( elem ) except KeyError : raise KeyError ( "'{}' was not found in the {}: '{}'." . format ( elem , type ( self ) . __name__ , self ) ) from None
Removes _elem_ from the collection will raise a KeyError is _elem_ is missing
65
21
18,329
def clear ( self ) : self . bad = False self . errors = { } self . _collection . clear ( )
Removes all elements from the collection and resets the error handling
25
13
18,330
def pop ( self ) : try : return self . _collection . pop ( ) except KeyError : raise KeyError ( "Nothing left in the {}: '{}'." . format ( type ( self ) . __name__ , self ) ) from None
Removes a random element from the collection and returns it
53
11
18,331
def copy ( self ) : collectedCopy = copy . copy ( self ) collectedCopy . _collection = copy . copy ( collectedCopy . _collection ) self . _collectedTypes = copy . copy ( self . _collectedTypes ) self . _allowedTypes = copy . copy ( self . _allowedTypes ) collectedCopy . errors = copy . copy ( collectedCopy . errors ) return collectedCopy
Creates a shallow copy of the collection
82
8
18,332
def chunk ( self , maxSize ) : chunks = [ ] currentSize = maxSize + 1 for i in self : if currentSize >= maxSize : currentSize = 0 chunks . append ( type ( self ) ( { i } , name = 'Chunk-{}-of-{}' . format ( len ( chunks ) , self . name ) , quietStart = True ) ) else : chunks [ - 1 ] . add ( i ) currentSize += 1 return chunks
Splits the Collection into _maxSize_ size or smaller Collections
100
13
18,333
def split ( self , maxSize ) : chunks = [ ] currentSize = maxSize + 1 try : while True : if currentSize >= maxSize : currentSize = 0 chunks . append ( type ( self ) ( { self . pop ( ) } , name = 'Chunk-{}-of-{}' . format ( len ( chunks ) , self . name ) , quietStart = True ) ) else : chunks [ - 1 ] . add ( self . pop ( ) ) currentSize += 1 except KeyError : self . clear ( ) self . name = 'Emptied-{}' . format ( self . name ) return chunks
Destructively splits the Collection into _maxSize_ size or smaller Collections . The source Collection will be empty after this operation
136
25
18,334
def containsID ( self , idVal ) : for i in self : if i . id == idVal : return True return False
Checks if the collected items contains the give _idVal_
27
13
18,335
def discardID ( self , idVal ) : for i in self : if i . id == idVal : self . _collection . discard ( i ) return
Checks if the collected items contains the give _idVal_ and discards it if it is found will not raise an exception if item is not found
33
31
18,336
def removeID ( self , idVal ) : for i in self : if i . id == idVal : self . _collection . remove ( i ) return raise KeyError ( "A Record with the ID '{}' was not found in the RecordCollection: '{}'." . format ( idVal , self ) )
Checks if the collected items contains the give _idVal_ and removes it if it is found will raise a KeyError if item is not found
68
30
18,337
def badEntries ( self ) : badEntries = set ( ) for i in self : if i . bad : badEntries . add ( i ) return type ( self ) ( badEntries , quietStart = True )
Creates a new collection of the same type with only the bad entries
48
14
18,338
def dropBadEntries ( self ) : self . _collection = set ( ( i for i in self if not i . bad ) ) self . bad = False self . errors = { }
Removes all the bad entries from the collection
40
9
18,339
def tags ( self ) : tags = set ( ) for i in self : tags |= set ( i . keys ( ) ) return tags
Creates a list of all the tags of the contained items
29
12
18,340
def rankedSeries ( self , tag , outputFile = None , giveCounts = True , giveRanks = False , greatestFirst = True , pandasMode = True , limitTo = None ) : if giveRanks and giveCounts : raise mkException ( "rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True." ) seriesDict = { } for R in self : #This should be faster than using get, since get is a wrapper for __getitem__ try : val = R [ tag ] except KeyError : continue if not isinstance ( val , list ) : val = [ val ] for entry in val : if limitTo and entry not in limitTo : continue if entry in seriesDict : seriesDict [ entry ] += 1 else : seriesDict [ entry ] = 1 seriesList = sorted ( seriesDict . items ( ) , key = lambda x : x [ 1 ] , reverse = greatestFirst ) if outputFile is not None : with open ( outputFile , 'w' ) as f : writer = csv . writer ( f , dialect = 'excel' ) writer . writerow ( ( str ( tag ) , 'count' ) ) writer . writerows ( seriesList ) if giveCounts and not pandasMode : return seriesList elif giveRanks or pandasMode : if not greatestFirst : seriesList . reverse ( ) currentRank = 1 retList = [ ] panDict = { 'entry' : [ ] , 'count' : [ ] , 'rank' : [ ] } try : currentCount = seriesList [ 0 ] [ 1 ] except IndexError : #Empty series so no need to loop pass else : for valString , count in seriesList : if currentCount > count : currentRank += 1 currentCount = count if pandasMode : panDict [ 'entry' ] . append ( valString ) panDict [ 'count' ] . append ( count ) panDict [ 'rank' ] . append ( currentRank ) else : retList . append ( ( valString , currentRank ) ) if not greatestFirst : retList . reverse ( ) if pandasMode : return panDict else : return retList else : return [ e for e , c in seriesList ]
Creates an pandas dict of the ordered list of all the values of _tag_ with and ranked by their number of occurrences . A list can also be returned with the the counts or ranks added or it can be written to a file .
484
49
18,341
def timeSeries ( self , tag = None , outputFile = None , giveYears = True , greatestFirst = True , limitTo = False , pandasMode = True ) : seriesDict = { } for R in self : #This should be faster than using get, since get is a wrapper for __getitem__ try : year = R [ 'year' ] except KeyError : continue if tag is None : seriesDict [ R ] = { year : 1 } else : try : val = R [ tag ] except KeyError : continue if not isinstance ( val , list ) : val = [ val ] for entry in val : if limitTo and entry not in limitTo : continue if entry in seriesDict : try : seriesDict [ entry ] [ year ] += 1 except KeyError : seriesDict [ entry ] [ year ] = 1 else : seriesDict [ entry ] = { year : 1 } seriesList = [ ] for e , yd in seriesDict . items ( ) : seriesList += [ ( e , y ) for y in yd . keys ( ) ] seriesList = sorted ( seriesList , key = lambda x : x [ 1 ] , reverse = greatestFirst ) if outputFile is not None : with open ( outputFile , 'w' ) as f : writer = csv . writer ( f , dialect = 'excel' ) writer . writerow ( ( str ( tag ) , 'years' ) ) writer . writerows ( ( ( k , '|' . join ( ( str ( y ) for y in v ) ) ) for k , v in seriesDict . items ( ) ) ) if pandasMode : panDict = { 'entry' : [ ] , 'count' : [ ] , 'year' : [ ] } for entry , year in seriesList : panDict [ 'entry' ] . append ( entry ) panDict [ 'year' ] . append ( year ) panDict [ 'count' ] . append ( seriesDict [ entry ] [ year ] ) return panDict elif giveYears : return seriesList else : return [ e for e , c in seriesList ]
Creates an pandas dict of the ordered list of all the values of _tag_ with and ranked by the year the occurred in multiple year occurrences will create multiple entries . A list can also be returned with the the counts or years added or it can be written to a file .
459
57
18,342
def cooccurrenceCounts ( self , keyTag , * countedTags ) : if not isinstance ( keyTag , str ) : raise TagError ( "'{}' is not a string it cannot be used as a tag." . format ( keyTag ) ) if len ( countedTags ) < 1 : TagError ( "You need to provide atleast one tag" ) for tag in countedTags : if not isinstance ( tag , str ) : raise TagError ( "'{}' is not a string it cannot be used as a tag." . format ( tag ) ) occurenceDict = { } progArgs = ( 0 , "Starting to count the co-occurrences of '{}' and' {}'" . format ( keyTag , "','" . join ( countedTags ) ) ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , * * progKwargs ) as PBar : for i , R in enumerate ( self ) : PBar . updateVal ( i / len ( self ) , "Analyzing {}" . format ( R ) ) keyVal = R . get ( keyTag ) if keyVal is None : continue if not isinstance ( keyVal , list ) : keyVal = [ keyVal ] for key in keyVal : if key not in occurenceDict : occurenceDict [ key ] = { } for tag in countedTags : tagval = R . get ( tag ) if tagval is None : continue if not isinstance ( tagval , list ) : tagval = [ tagval ] for val in tagval : for key in keyVal : try : occurenceDict [ key ] [ val ] += 1 except KeyError : occurenceDict [ key ] [ val ] = 1 PBar . finish ( "Done extracting the co-occurrences of '{}' and '{}'" . format ( keyTag , "','" . join ( countedTags ) ) ) return occurenceDict
Counts the number of times values from any of the _countedTags_ occurs with _keyTag_ . The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts .
459
59
18,343
def makeNodeID ( Rec , ndType , extras = None ) : if ndType == 'raw' : recID = Rec else : recID = Rec . get ( ndType ) if recID is None : pass elif isinstance ( recID , list ) : recID = tuple ( recID ) else : recID = recID extraDict = { } if extras : for tag in extras : if tag == "raw" : extraDict [ 'Tag' ] = Rec else : extraDict [ 'Tag' ] = Rec . get ( tag ) return recID , extraDict
Helper to make a node ID extras is currently not used
130
11
18,344
def pandoc_process ( app , what , name , obj , options , lines ) : if not lines : return None input_format = app . config . mkdsupport_use_parser output_format = 'rst' # Since default encoding for sphinx.ext.autodoc is unicode and pypandoc.convert_text, which will always return a # unicode string, expects unicode or utf-8 encodes string, there is on need for dealing with coding text = SEP . join ( lines ) text = pypandoc . convert_text ( text , output_format , format = input_format ) # The 'lines' in Sphinx is a list of strings and the value should be changed del lines [ : ] lines . extend ( text . split ( SEP ) )
Convert docstrings in Markdown into reStructureText using pandoc
174
15
18,345
def beginningPage ( R ) : p = R [ 'PG' ] if p . startswith ( 'suppl ' ) : p = p [ 6 : ] return p . split ( ' ' ) [ 0 ] . split ( '-' ) [ 0 ] . replace ( ';' , '' )
As pages may not be given as numbers this is the most accurate this function can be
64
17
18,346
def copy ( self ) : c = copy . copy ( self ) c . _fieldDict = c . _fieldDict . copy ( ) return c
Correctly copies the Record
33
5
18,347
def values ( self , raw = False ) : if raw : return self . _fieldDict . values ( ) else : return collections . abc . Mapping . values ( self )
Like values for dicts but with a raw option
39
10
18,348
def items ( self , raw = False ) : if raw : return self . _fieldDict . items ( ) else : return collections . abc . Mapping . items ( self )
Like items for dicts but with a raw option
39
10
18,349
def getCitations ( self , field = None , values = None , pandasFriendly = True ) : retCites = [ ] if values is not None : if isinstance ( values , ( str , int , float ) ) or not isinstance ( values , collections . abc . Container ) : values = [ values ] if field is not None : for cite in self . get ( 'citations' , [ ] ) : try : targetVal = getattr ( cite , field ) if values is None or targetVal in values : retCites . append ( cite ) except AttributeError : pass else : retCites = self . get ( 'citations' , [ ] ) if pandasFriendly : return _pandasPrep ( retCites , False ) return retCites
Creates a pandas ready dict with each row a different citation and columns containing the original string year journal and author s name .
168
26
18,350
def subDict ( self , tags , raw = False ) : retDict = { } for tag in tags : retDict [ tag ] = self . get ( tag , raw = raw ) return retDict
Creates a dict of values of _tags_ from the Record . The tags are the keys and the values are the values . If the tag is missing the value will be None .
46
37
18,351
def authGenders ( self , countsOnly = False , fractionsMode = False , _countsTuple = False ) : authDict = recordGenders ( self ) if _countsTuple or countsOnly or fractionsMode : rawList = list ( authDict . values ( ) ) countsList = [ ] for k in ( 'Male' , 'Female' , 'Unknown' ) : countsList . append ( rawList . count ( k ) ) if fractionsMode : tot = sum ( countsList ) for i in range ( 3 ) : countsList . append ( countsList . pop ( 0 ) / tot ) if _countsTuple : return tuple ( countsList ) else : return { 'Male' : countsList [ 0 ] , 'Female' : countsList [ 1 ] , 'Unknown' : countsList [ 2 ] } else : return authDict
Creates a dict mapping Male Female and Unknown to lists of the names of all the authors .
184
19
18,352
def proQuestParser ( proFile ) : #assumes the file is ProQuest nameDict = { } recSet = set ( ) error = None lineNum = 0 try : with open ( proFile , 'r' , encoding = 'utf-8' ) as openfile : f = enumerate ( openfile , start = 1 ) for i in range ( 12 ) : lineNum , line = next ( f ) # f is file so it *should* end, or at least cause a parser error eventually while True : lineNum , line = next ( f ) lineNum , line = next ( f ) if line == 'Bibliography\n' : for i in range ( 3 ) : lineNum , line = next ( f ) break else : s = line . split ( '. ' ) nameDict [ int ( s [ 0 ] ) ] = '. ' . join ( s [ 1 : ] ) [ : - 1 ] while True : #import pdb; pdb.set_trace() lineNum , line = next ( f ) if line == 'Bibliography\n' : break elif line . startswith ( 'Document ' ) : n = int ( line [ 9 : ] . split ( ' of ' ) [ 0 ] ) R = ProQuestRecord ( f , sFile = proFile , sLine = lineNum ) if R . get ( 'Title' ) != nameDict [ n ] : error = BadProQuestFile ( "The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'." . format ( lineNum , R . get ( 'Title' , "TITLE MISSING" ) , n , nameDict [ n ] ) ) raise StopIteration recSet . add ( R ) lineNum , line = next ( f ) else : #Parsing failed error = BadProQuestFile ( "The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect" . format ( proFile , lineNum ) ) raise StopIteration except ( UnicodeDecodeError , StopIteration , ValueError ) as e : if error is None : error = BadProQuestFile ( "The file '{}' has parts of it that are unparsable starting at line: {}.\nThe error was: '{}'" . format ( proFile , lineNum , e ) ) return recSet , error
Parses a ProQuest file _proFile_ to extract the individual entries .
544
17
18,353
def getInvestigators ( self , tags = None , seperator = ";" , _getTag = False ) : if tags is None : tags = [ 'Investigator' ] elif isinstance ( tags , str ) : tags = [ 'Investigator' , tags ] else : tags . append ( 'Investigator' ) return super ( ) . getInvestigators ( tags = tags , seperator = seperator , _getTag = _getTag )
Returns a list of the names of investigators . The optional arguments are ignored .
96
15
18,354
def nameStringGender ( s , noExcept = False ) : global mappingDict try : first = s . split ( ', ' ) [ 1 ] . split ( ' ' ) [ 0 ] . title ( ) except IndexError : if noExcept : return 'Unknown' else : return GenderException ( "The given String: '{}' does not have a last name, first name pair in with a ', ' seperation." . format ( s ) ) if mappingDict is None : mappingDict = getMapping ( ) return mappingDict . get ( first , 'Unknown' )
Expects first last
125
4
18,355
def j9urlGenerator ( nameDict = False ) : start = "https://images.webofknowledge.com/images/help/WOS/" end = "_abrvjt.html" if nameDict : urls = { "0-9" : start + "0-9" + end } for c in string . ascii_uppercase : urls [ c ] = start + c + end else : urls = [ start + "0-9" + end ] for c in string . ascii_uppercase : urls . append ( start + c + end ) return urls
How to get all the urls for the WOS Journal Title Abbreviations . Each is varies by only a few characters . These are the currently in use urls they may change .
137
39
18,356
def _j9SaveCurrent ( sDir = '.' ) : dname = os . path . normpath ( sDir + '/' + datetime . datetime . now ( ) . strftime ( "%Y-%m-%d_J9_AbbreviationDocs" ) ) if not os . path . isdir ( dname ) : os . mkdir ( dname ) os . chdir ( dname ) else : os . chdir ( dname ) for urlID , urlString in j9urlGenerator ( nameDict = True ) . items ( ) : fname = "{}_abrvjt.html" . format ( urlID ) f = open ( fname , 'wb' ) f . write ( urllib . request . urlopen ( urlString ) . read ( ) )
Downloads and saves all the webpages
178
8
18,357
def _getDict ( j9Page ) : slines = j9Page . read ( ) . decode ( 'utf-8' ) . split ( '\n' ) while slines . pop ( 0 ) != "<DL>" : pass currentName = slines . pop ( 0 ) . split ( '"></A><DT>' ) [ 1 ] currentTag = slines . pop ( 0 ) . split ( "<B><DD>\t" ) [ 1 ] j9Dict = { } while True : try : j9Dict [ currentTag ] . append ( currentName ) except KeyError : j9Dict [ currentTag ] = [ currentName ] try : currentName = slines . pop ( 0 ) . split ( '</B><DT>' ) [ 1 ] currentTag = slines . pop ( 0 ) . split ( "<B><DD>\t" ) [ 1 ] except IndexError : break return j9Dict
Parses a Journal Title Abbreviations page
205
11
18,358
def _getCurrentj9Dict ( ) : urls = j9urlGenerator ( ) j9Dict = { } for url in urls : d = _getDict ( urllib . request . urlopen ( url ) ) if len ( d ) == 0 : raise RuntimeError ( "Parsing failed, this is could require an update of the parser." ) j9Dict . update ( d ) return j9Dict
Downloads and parses all the webpages
96
9
18,359
def updatej9DB ( dbname = abrevDBname , saveRawHTML = False ) : if saveRawHTML : rawDir = '{}/j9Raws' . format ( os . path . dirname ( __file__ ) ) if not os . path . isdir ( rawDir ) : os . mkdir ( rawDir ) _j9SaveCurrent ( sDir = rawDir ) dbLoc = os . path . join ( os . path . normpath ( os . path . dirname ( __file__ ) ) , dbname ) try : with dbm . dumb . open ( dbLoc , flag = 'c' ) as db : try : j9Dict = _getCurrentj9Dict ( ) except urllib . error . URLError : raise urllib . error . URLError ( "Unable to access server, check your connection" ) for k , v in j9Dict . items ( ) : if k in db : for jName in v : if jName not in j9Dict [ k ] : j9Dict [ k ] += '|' + jName else : db [ k ] = '|' . join ( v ) except dbm . dumb . error as e : raise JournalDataBaseError ( "Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'" . format ( dbLoc , os . path . dirname ( __file__ ) , e ) )
Updates the database of Journal Title Abbreviations . Requires an internet connection . The data base is saved relative to the source file not the working directory .
360
32
18,360
def getj9dict ( dbname = abrevDBname , manualDB = manualDBname , returnDict = 'both' ) : dbLoc = os . path . normpath ( os . path . dirname ( __file__ ) ) retDict = { } try : if returnDict == 'both' or returnDict == 'WOS' : with dbm . dumb . open ( dbLoc + '/{}' . format ( dbname ) ) as db : if len ( db ) == 0 : raise JournalDataBaseError ( "J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB()." ) for k , v in db . items ( ) : retDict [ k . decode ( 'utf-8' ) ] = v . decode ( 'utf-8' ) . split ( '|' ) except JournalDataBaseError : updatej9DB ( ) return getj9dict ( dbname = dbname , manualDB = manualDB , returnDict = returnDict ) try : if returnDict == 'both' or returnDict == 'manual' : if os . path . isfile ( dbLoc + '/{}.dat' . format ( manualDB ) ) : with dbm . dumb . open ( dbLoc + '/{}' . format ( manualDB ) ) as db : for k , v in db . items ( ) : retDict [ k . decode ( 'utf-8' ) ] = v . decode ( 'utf-8' ) . split ( '|' ) else : if returnDict == 'manual' : raise JournalDataBaseError ( "Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})" . format ( manualDB ) ) except JournalDataBaseError : updatej9DB ( dbname = manualDB ) return getj9dict ( dbname = dbname , manualDB = manualDB , returnDict = returnDict ) return retDict
Returns the dictionary of journal abbreviations mapping to a list of the associated journal names . By default the local database is used . The database is in the file _dbname_ in the same directory as this source file
445
43
18,361
def normalizeToTag ( val ) : try : val = val . upper ( ) except AttributeError : raise KeyError ( "{} is not a tag or name string" . format ( val ) ) if val not in tagsAndNameSetUpper : raise KeyError ( "{} is not a tag or name string" . format ( val ) ) else : try : return fullToTagDictUpper [ val ] except KeyError : return val
Converts tags or full names to 2 character tags case insensitive
95
12
18,362
def normalizeToName ( val ) : if val not in tagsAndNameSet : raise KeyError ( "{} is not a tag or name string" . format ( val ) ) else : try : return tagToFullDict [ val ] except KeyError : return val
Converts tags or full names to full names case sensitive
57
11
18,363
def update ( self , other ) : if type ( self ) != type ( other ) : return NotImplemented else : if other . bad : self . error = other . error self . bad = True self . _fieldDict . update ( other . _fieldDict )
Adds all the tag - entry pairs from _other_ to the Grant . If there is a conflict _other_ takes precedence .
59
26
18,364
def relay_events_from ( self , originator , event_type , * more_event_types ) : handlers = { event_type : lambda * args , * * kwargs : self . dispatch_event ( event_type , * args , * * kwargs ) for event_type in ( event_type , ) + more_event_types } originator . set_handlers ( * * handlers )
Configure this handler to re - dispatch events from another handler .
90
13
18,365
def start_event ( self , event_type , * args , dt = 1 / 60 ) : # Don't bother scheduling a timer if nobody's listening. This isn't # great from a general-purpose perspective, because a long-lived event # could have listeners attach and detach in the middle. But I don't # like the idea of making a bunch of clocks to spit out a bunch of # events that are never used, although to be fair I don't actually know # how expensive that would be. If I want to make this implementation # more general purpose, I could start and stop timers as necessary in # the methods that add or remove handlers. if not any ( self . __yield_handlers ( event_type ) ) : return def on_time_interval ( dt ) : # self . dispatch_event ( event_type , * args , dt ) pyglet . clock . schedule_interval ( on_time_interval , dt ) self . __timers [ event_type ] = on_time_interval
Begin dispatching the given event at the given frequency .
223
11
18,366
def stop_event ( self , event_type ) : if event_type in self . __timers : pyglet . clock . unschedule ( self . __timers [ event_type ] )
Stop dispatching the given event .
44
7
18,367
def __yield_handlers ( self , event_type ) : if event_type not in self . event_types : raise ValueError ( "%r not found in %r.event_types == %r" % ( event_type , self , self . event_types ) ) # Search handler stack for matching event handlers for frame in list ( self . _event_stack ) : if event_type in frame : yield frame [ event_type ] # Check instance for an event handler if hasattr ( self , event_type ) : yield getattr ( self , event_type )
Yield all the handlers registered for the given event type .
124
12
18,368
def _filter_pending_updates ( self ) : from more_itertools import unique_everseen as unique yield from reversed ( list ( unique ( reversed ( self . _pending_updates ) ) ) )
Return all the updates that need to be applied from a list of all the updates that were called while the hold was active . This method is meant to be overridden by subclasses that want to customize how held updates are applied .
48
46
18,369
def get_html ( self ) : here = path . abspath ( path . dirname ( __file__ ) ) env = Environment ( loader = FileSystemLoader ( path . join ( here , "res/" ) ) ) suggest = env . get_template ( "suggest.htm.j2" ) return suggest . render ( logo = path . join ( here , "res/logo.png" ) , user_login = self . user , repos = self . repos , )
Method to convert the repository list to a search results page .
104
12
18,370
def to_html ( self , write_to ) : page_html = self . get_html ( ) with open ( write_to , "wb" ) as writefile : writefile . write ( page_html . encode ( "utf-8" ) )
Method to convert the repository list to a search results page and write it to a HTML file .
56
19
18,371
def get_unique_repositories ( repo_list ) : unique_list = list ( ) included = defaultdict ( lambda : False ) for repo in repo_list : if not included [ repo . full_name ] : unique_list . append ( repo ) included [ repo . full_name ] = True return unique_list
Method to create unique list of repositories from the list of repositories given .
71
14
18,372
def minus ( repo_list_a , repo_list_b ) : included = defaultdict ( lambda : False ) for repo in repo_list_b : included [ repo . full_name ] = True a_minus_b = list ( ) for repo in repo_list_a : if not included [ repo . full_name ] : included [ repo . full_name ] = True a_minus_b . append ( repo ) return a_minus_b
Method to create a list of repositories such that the repository belongs to repo list a but not repo list b .
99
22
18,373
def __populate_repositories_of_interest ( self , username ) : # Handle to the user to whom repositories need to be suggested. user = self . github . get_user ( username ) # Procure repositories starred by the user. self . user_starred_repositories . extend ( user . get_starred ( ) ) # Repositories starred by users followed by the user. if self . deep_dive : for following_user in user . get_following ( ) : self . user_following_starred_repositories . extend ( following_user . get_starred ( ) )
Method to populate repositories which will be used to suggest repositories for the user . For this purpose we use two kinds of repositories .
137
25
18,374
def __get_interests ( self ) : # All repositories of interest. repos_of_interest = itertools . chain ( self . user_starred_repositories , self . user_following_starred_repositories , ) # Extract descriptions out of repositories of interest. repo_descriptions = [ repo . description for repo in repos_of_interest ] return list ( set ( repo_descriptions ) )
Method to procure description of repositories the authenticated user is interested in .
98
13
18,375
def __get_words_to_ignore ( self ) : # Stop words in English. english_stopwords = stopwords . words ( "english" ) here = path . abspath ( path . dirname ( __file__ ) ) # Languages in git repositories. git_languages = [ ] with open ( path . join ( here , "gitlang/languages.txt" ) , "r" ) as langauges : git_languages = [ line . strip ( ) for line in langauges ] # Other words to avoid in git repositories. words_to_avoid = [ ] with open ( path . join ( here , "gitlang/others.txt" ) , "r" ) as languages : words_to_avoid = [ line . strip ( ) for line in languages ] return set ( itertools . chain ( english_stopwords , git_languages , words_to_avoid ) )
Compiles list of all words to ignore .
197
9
18,376
def __clean_and_tokenize ( self , doc_list ) : # Some repositories fill entire documentation in description. We ignore # such repositories for cleaner tokens. doc_list = filter ( lambda x : x is not None and len ( x ) <= GitSuggest . MAX_DESC_LEN , doc_list , ) cleaned_doc_list = list ( ) # Regular expression to remove out all punctuations, numbers and other # un-necessary text substrings like emojis etc. tokenizer = RegexpTokenizer ( r"[a-zA-Z]+" ) # Get stop words. stopwords = self . __get_words_to_ignore ( ) # Get english words. dict_words = self . __get_words_to_consider ( ) for doc in doc_list : # Lowercase doc. lower = doc . lower ( ) # Tokenize removing numbers and punctuation. tokens = tokenizer . tokenize ( lower ) # Include meaningful words. tokens = [ tok for tok in tokens if tok in dict_words ] # Remove stopwords. tokens = [ tok for tok in tokens if tok not in stopwords ] # Filter Nones if any are introduced. tokens = [ tok for tok in tokens if tok is not None ] cleaned_doc_list . append ( tokens ) return cleaned_doc_list
Method to clean and tokenize the document list .
291
10
18,377
def __construct_lda_model ( self ) : # Fetch descriptions of repos of interest to authenticated user. repos_of_interest = self . __get_interests ( ) # Procure clean tokens from the descriptions. cleaned_tokens = self . __clean_and_tokenize ( repos_of_interest ) # If cleaned tokens are empty, it can cause an exception while # generating LDA. But tokens shouldn't be something meaningful as that # would mean we are suggesting repos without reason. Hence the random # string to ensure that LDA doesn't cause exception but the token # doesn't generate any suggestions either. if not cleaned_tokens : cleaned_tokens = [ [ "zkfgzkfgzkfgzkfgzkfgzkfg" ] ] # Setup LDA requisites. dictionary = corpora . Dictionary ( cleaned_tokens ) corpus = [ dictionary . doc2bow ( text ) for text in cleaned_tokens ] # Generate LDA model self . lda_model = models . ldamodel . LdaModel ( corpus , num_topics = 1 , id2word = dictionary , passes = 10 )
Method to create LDA model to procure list of topics from .
256
13
18,378
def __get_query_for_repos ( self , term_count = 5 ) : repo_query_terms = list ( ) for term in self . lda_model . get_topic_terms ( 0 , topn = term_count ) : repo_query_terms . append ( self . lda_model . id2word [ term [ 0 ] ] ) return " " . join ( repo_query_terms )
Method to procure query based on topics authenticated user is interested in .
92
13
18,379
def get_suggested_repositories ( self ) : if self . suggested_repositories is None : # Procure repositories to suggest to user. repository_set = list ( ) for term_count in range ( 5 , 2 , - 1 ) : query = self . __get_query_for_repos ( term_count = term_count ) repository_set . extend ( self . __get_repos_for_query ( query ) ) # Remove repositories authenticated user is already interested in. catchy_repos = GitSuggest . minus ( repository_set , self . user_starred_repositories ) # Filter out repositories with too long descriptions. This is a # measure to weed out spammy repositories. filtered_repos = [ ] if len ( catchy_repos ) > 0 : for repo in catchy_repos : if ( repo is not None and repo . description is not None and len ( repo . description ) <= GitSuggest . MAX_DESC_LEN ) : filtered_repos . append ( repo ) # Present the repositories, highly starred to not starred. filtered_repos = sorted ( filtered_repos , key = attrgetter ( "stargazers_count" ) , reverse = True , ) self . suggested_repositories = GitSuggest . get_unique_repositories ( filtered_repos ) # Return an iterator to help user fetch the repository listing. for repository in self . suggested_repositories : yield repository
Method to procure suggested repositories for the user .
322
9
18,380
def guess_type ( s ) : sc = s . replace ( ',' , '' ) # remove comma from potential numbers try : return int ( sc ) except ValueError : pass try : return float ( sc ) except ValueError : pass return s
attempt to convert string value into numeric type
51
9
18,381
def parse ( self , node ) : self . _attrs = { } vals = [ ] yielded = False for x in self . _read_parts ( node ) : if isinstance ( x , Field ) : yielded = True x . attrs = self . _attrs yield x else : vals . append ( ustr ( x ) . strip ( ' \n\t' ) ) joined = ' ' . join ( [ x for x in vals if x ] ) if joined : yielded = True yield Field ( node , guess_type ( joined ) , self . _attrs ) if not yielded : yield Field ( node , "" , self . _attrs )
Return generator yielding Field objects for a given node
143
9
18,382
def parse ( self , * nodes ) : for n in nodes : if not n . contents : continue row = self . _parse ( n ) if not row . is_null : yield row
Parse one or more tr nodes yielding wikitables . Row objects
40
14
18,383
def _find_header_row ( self ) : th_max = 0 header_idx = 0 for idx , tr in enumerate ( self . _tr_nodes ) : th_count = len ( tr . contents . filter_tags ( matches = ftag ( 'th' ) ) ) if th_count > th_max : th_max = th_count header_idx = idx if not th_max : return self . _log ( 'found header at row %d (%d <th> elements)' % ( header_idx , th_max ) ) header_row = self . _tr_nodes . pop ( header_idx ) return header_row . contents . filter_tags ( matches = ftag ( 'th' ) )
Evaluate all rows and determine header position based on greatest number of th tagged elements
166
17
18,384
def _make_default_header ( self ) : td_max = 0 for idx , tr in enumerate ( self . _tr_nodes ) : td_count = len ( tr . contents . filter_tags ( matches = ftag ( 'td' ) ) ) if td_count > td_max : td_max = td_count self . _log ( 'creating default header (%d columns)' % td_max ) return [ 'column%d' % n for n in range ( 0 , td_max ) ]
Return a generic placeholder header based on the tables column count
115
11
18,385
def fetch_page ( self , title , method = 'GET' ) : params = { 'prop' : 'revisions' , 'format' : 'json' , 'action' : 'query' , 'explaintext' : '' , 'titles' : title , 'rvprop' : 'content' } r = self . request ( method , self . base_url , params = params ) r . raise_for_status ( ) pages = r . json ( ) [ "query" ] [ "pages" ] # use key from first result in 'pages' array pageid = list ( pages . keys ( ) ) [ 0 ] if pageid == '-1' : raise ArticleNotFound ( 'no matching articles returned' ) return pages [ pageid ]
Query for page by title
165
5
18,386
def print_stack ( pid , include_greenlet = False , debugger = None , verbose = False ) : # TextIOWrapper of Python 3 is so strange. sys_stdout = getattr ( sys . stdout , 'buffer' , sys . stdout ) sys_stderr = getattr ( sys . stderr , 'buffer' , sys . stderr ) make_args = make_gdb_args environ = dict ( os . environ ) if ( debugger == 'lldb' or ( debugger is None and platform . system ( ) . lower ( ) == 'darwin' ) ) : make_args = make_lldb_args # fix the PATH environment variable for using built-in Python with lldb environ [ 'PATH' ] = '/usr/bin:%s' % environ . get ( 'PATH' , '' ) tmp_fd , tmp_path = tempfile . mkstemp ( ) os . chmod ( tmp_path , 0o777 ) commands = [ ] commands . append ( FILE_OPEN_COMMAND ) commands . extend ( UTILITY_COMMANDS ) commands . extend ( THREAD_STACK_COMMANDS ) if include_greenlet : commands . extend ( GREENLET_STACK_COMMANDS ) commands . append ( FILE_CLOSE_COMMAND ) command = r';' . join ( commands ) args = make_args ( pid , command % tmp_path ) process = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , err = process . communicate ( ) if verbose : sys_stderr . write ( b'Standard Output:\n%s\n' % out ) sys_stderr . write ( b'Standard Error:\n%s\n' % err ) sys_stderr . flush ( ) for chunk in iter ( functools . partial ( os . read , tmp_fd , 1024 ) , b'' ) : sys_stdout . write ( chunk ) sys_stdout . write ( b'\n' ) sys_stdout . flush ( )
Executes a file in a running Python process .
469
10
18,387
def cli_main ( pid , include_greenlet , debugger , verbose ) : try : print_stack ( pid , include_greenlet , debugger , verbose ) except DebuggerNotFound as e : click . echo ( 'DebuggerNotFound: %s' % e . args [ 0 ] , err = True ) click . get_current_context ( ) . exit ( 1 )
Print stack of python process .
84
6
18,388
def forward_algo ( self , observations ) : # Store total number of observations total_stages = len(observations) total_stages = len ( observations ) # Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number # Inittialize Alpha ob_ind = self . obs_map [ observations [ 0 ] ] alpha = np . multiply ( np . transpose ( self . em_prob [ : , ob_ind ] ) , self . start_prob ) # Iteratively find alpha(using knowledge of alpha in the previous stage) for curr_t in range ( 1 , total_stages ) : ob_ind = self . obs_map [ observations [ curr_t ] ] alpha = np . dot ( alpha , self . trans_prob ) alpha = np . multiply ( alpha , np . transpose ( self . em_prob [ : , ob_ind ] ) ) # Sum the alpha's over the last stage total_prob = alpha . sum ( ) return ( total_prob )
Finds the probability of an observation sequence for given model parameters
236
12
18,389
def viterbi ( self , observations ) : # Find total states,observations total_stages = len ( observations ) num_states = len ( self . states ) # initialize data # Path stores the state sequence giving maximum probability old_path = np . zeros ( ( total_stages , num_states ) ) new_path = np . zeros ( ( total_stages , num_states ) ) # Find initial delta # Map observation to an index # delta[s] stores the probability of most probable path ending in state 's' ob_ind = self . obs_map [ observations [ 0 ] ] delta = np . multiply ( np . transpose ( self . em_prob [ : , ob_ind ] ) , self . start_prob ) # Scale delta delta = delta / np . sum ( delta ) # initialize path old_path [ 0 , : ] = [ i for i in range ( num_states ) ] # Find delta[t][x] for each state 'x' at the iteration 't' # delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path for curr_t in range ( 1 , total_stages ) : # Map observation to an index ob_ind = self . obs_map [ observations [ curr_t ] ] # Find temp and take max along each row to get delta temp = np . multiply ( np . multiply ( delta , self . trans_prob . transpose ( ) ) , self . em_prob [ : , ob_ind ] ) # Update delta and scale it delta = temp . max ( axis = 1 ) . transpose ( ) delta = delta / np . sum ( delta ) # Find state which is most probable using argax # Convert to a list for easier processing max_temp = temp . argmax ( axis = 1 ) . transpose ( ) max_temp = np . ravel ( max_temp ) . tolist ( ) # Update path for s in range ( num_states ) : new_path [ : curr_t , s ] = old_path [ 0 : curr_t , max_temp [ s ] ] new_path [ curr_t , : ] = [ i for i in range ( num_states ) ] old_path = new_path . copy ( ) # Find the state in last stage, giving maximum probability final_max = np . argmax ( np . ravel ( delta ) ) best_path = old_path [ : , final_max ] . tolist ( ) best_path_map = [ self . state_map [ i ] for i in best_path ] return best_path_map
The probability of occurence of the observation sequence
577
10
18,390
def train_hmm ( self , observation_list , iterations , quantities ) : obs_size = len ( observation_list ) prob = float ( 'inf' ) q = quantities # Train the model 'iteration' number of times # store em_prob and trans_prob copies since you should use same values for one loop for i in range ( iterations ) : emProbNew = np . asmatrix ( np . zeros ( ( self . em_prob . shape ) ) ) transProbNew = np . asmatrix ( np . zeros ( ( self . trans_prob . shape ) ) ) startProbNew = np . asmatrix ( np . zeros ( ( self . start_prob . shape ) ) ) for j in range ( obs_size ) : # re-assing values based on weight emProbNew = emProbNew + q [ j ] * self . _train_emission ( observation_list [ j ] ) transProbNew = transProbNew + q [ j ] * self . _train_transition ( observation_list [ j ] ) startProbNew = startProbNew + q [ j ] * self . _train_start_prob ( observation_list [ j ] ) # Normalizing em_norm = emProbNew . sum ( axis = 1 ) trans_norm = transProbNew . sum ( axis = 1 ) start_norm = startProbNew . sum ( axis = 1 ) emProbNew = emProbNew / em_norm . transpose ( ) startProbNew = startProbNew / start_norm . transpose ( ) transProbNew = transProbNew / trans_norm . transpose ( ) self . em_prob , self . trans_prob = emProbNew , transProbNew self . start_prob = startProbNew if prob - self . log_prob ( observation_list , quantities ) > 0.0000001 : prob = self . log_prob ( observation_list , quantities ) else : return self . em_prob , self . trans_prob , self . start_prob return self . em_prob , self . trans_prob , self . start_prob
Runs the Baum Welch Algorithm and finds the new model parameters
489
14
18,391
def log_prob ( self , observations_list , quantities ) : prob = 0 for q , obs in enumerate ( observations_list ) : temp , c_scale = self . _alpha_cal ( obs ) prob = prob + - 1 * quantities [ q ] * np . sum ( np . log ( c_scale ) ) return prob
Finds Weighted log probability of a list of observation sequences
73
12
18,392
def __fetch_data ( self , url ) : url += '&api_key=' + self . api_key try : response = urlopen ( url ) root = ET . fromstring ( response . read ( ) ) except HTTPError as exc : root = ET . fromstring ( exc . read ( ) ) raise ValueError ( root . get ( 'message' ) ) return root
helper function for fetching data given a request URL
82
11
18,393
def _parse ( self , date_str , format = '%Y-%m-%d' ) : rv = pd . to_datetime ( date_str , format = format ) if hasattr ( rv , 'to_pydatetime' ) : rv = rv . to_pydatetime ( ) return rv
helper function for parsing FRED date string into datetime
77
12
18,394
def get_series_first_release ( self , series_id ) : df = self . get_series_all_releases ( series_id ) first_release = df . groupby ( 'date' ) . head ( 1 ) data = first_release . set_index ( 'date' ) [ 'value' ] return data
Get first - release data for a Fred series id . This ignores any revision to the data series . For instance The US GDP for Q1 2014 was first released to be 17149 . 6 and then later revised to 17101 . 3 and 17016 . 0 . This will ignore revisions after the first release .
72
62
18,395
def get_series_as_of_date ( self , series_id , as_of_date ) : as_of_date = pd . to_datetime ( as_of_date ) df = self . get_series_all_releases ( series_id ) data = df [ df [ 'realtime_start' ] <= as_of_date ] return data
Get latest data for a Fred series id as known on a particular date . This includes any revision to the data series before or on as_of_date but ignores any revision on dates after as_of_date .
83
44
18,396
def get_series_vintage_dates ( self , series_id ) : url = "%s/series/vintagedates?series_id=%s" % ( self . root_url , series_id ) root = self . __fetch_data ( url ) if root is None : raise ValueError ( 'No vintage date exists for series id: ' + series_id ) dates = [ ] for child in root . getchildren ( ) : dates . append ( self . _parse ( child . text ) ) return dates
Get a list of vintage dates for a series . Vintage dates are the dates in history when a series data values were revised or new data values were released .
114
31
18,397
def __do_series_search ( self , url ) : root = self . __fetch_data ( url ) series_ids = [ ] data = { } num_results_returned = 0 # number of results returned in this HTTP request num_results_total = int ( root . get ( 'count' ) ) # total number of results, this can be larger than number of results returned for child in root . getchildren ( ) : num_results_returned += 1 series_id = child . get ( 'id' ) series_ids . append ( series_id ) data [ series_id ] = { "id" : series_id } fields = [ "realtime_start" , "realtime_end" , "title" , "observation_start" , "observation_end" , "frequency" , "frequency_short" , "units" , "units_short" , "seasonal_adjustment" , "seasonal_adjustment_short" , "last_updated" , "popularity" , "notes" ] for field in fields : data [ series_id ] [ field ] = child . get ( field ) if num_results_returned > 0 : data = pd . DataFrame ( data , columns = series_ids ) . T # parse datetime columns for field in [ "realtime_start" , "realtime_end" , "observation_start" , "observation_end" , "last_updated" ] : data [ field ] = data [ field ] . apply ( self . _parse , format = None ) # set index name data . index . name = 'series id' else : data = None return data , num_results_total
helper function for making one HTTP request for data and parsing the returned results into a DataFrame
373
19
18,398
def __get_search_results ( self , url , limit , order_by , sort_order , filter ) : order_by_options = [ 'search_rank' , 'series_id' , 'title' , 'units' , 'frequency' , 'seasonal_adjustment' , 'realtime_start' , 'realtime_end' , 'last_updated' , 'observation_start' , 'observation_end' , 'popularity' ] if order_by is not None : if order_by in order_by_options : url = url + '&order_by=' + order_by else : raise ValueError ( '%s is not in the valid list of order_by options: %s' % ( order_by , str ( order_by_options ) ) ) if filter is not None : if len ( filter ) == 2 : url = url + '&filter_variable=%s&filter_value=%s' % ( filter [ 0 ] , filter [ 1 ] ) else : raise ValueError ( 'Filter should be a 2 item tuple like (filter_variable, filter_value)' ) sort_order_options = [ 'asc' , 'desc' ] if sort_order is not None : if sort_order in sort_order_options : url = url + '&sort_order=' + sort_order else : raise ValueError ( '%s is not in the valid list of sort_order options: %s' % ( sort_order , str ( sort_order_options ) ) ) data , num_results_total = self . __do_series_search ( url ) if data is None : return data if limit == 0 : max_results_needed = num_results_total else : max_results_needed = limit if max_results_needed > self . max_results_per_request : for i in range ( 1 , max_results_needed // self . max_results_per_request + 1 ) : offset = i * self . max_results_per_request next_data , _ = self . __do_series_search ( url + '&offset=' + str ( offset ) ) data = data . append ( next_data ) return data . head ( max_results_needed )
helper function for getting search results up to specified limit on the number of results . The Fred HTTP API truncates to 1000 results per request so this may issue multiple HTTP requests to obtain more available data .
492
41
18,399
def search ( self , text , limit = 1000 , order_by = None , sort_order = None , filter = None ) : url = "%s/series/search?search_text=%s&" % ( self . root_url , quote_plus ( text ) ) info = self . __get_search_results ( url , limit , order_by , sort_order , filter ) return info
Do a fulltext search for series in the Fred dataset . Returns information about matching series in a DataFrame .
87
22