idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
37,400
def energy ( self ) : r s , b , W , N = self . state , self . b , self . W , self . N self . E = - sum ( s * b ) - sum ( [ s [ i ] * s [ j ] * W [ i , j ] for ( i , j ) in product ( range ( N ) , range ( N ) ) if i < j ] ) self . low_energies [ - 1 ] = self . E self . low_energies . sort ( ) self . high_energies [ - 1 ] = self . E self . high_energies . sort ( ) self . high_energies = self . high_energies [ : : - 1 ] return self . E
r Compute the global energy for the current joint state of all nodes
37,401
def translate ( self , text , to_template = '{name} ({url})' , from_template = None , name_matcher = None , url_matcher = None ) : return self . replace ( text , to_template = to_template , from_template = from_template , name_matcher = name_matcher , url_matcher = url_matcher )
Translate hyperinks into printable book style for Manning Publishing
37,402
def main ( dialogpath = None ) : if dialogpath is None : args = parse_args ( ) dialogpath = os . path . abspath ( os . path . expanduser ( args . dialogpath ) ) else : dialogpath = os . path . abspath ( os . path . expanduser ( args . dialogpath ) ) return clean_csvs ( dialogpath = dialogpath )
Parse the state transition graph for a set of dialog - definition tables to find an fix deadends
37,403
def prepare_data_maybe_download ( directory ) : filename = 'ubuntu_dialogs.tgz' url = 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz' dialogs_path = os . path . join ( directory , 'dialogs' ) if not os . path . exists ( os . path . join ( directory , "10" , "1.tst" ) ) : archive_path = os . path . join ( directory , filename ) if not os . path . exists ( archive_path ) : print ( "Downloading %s to %s" % ( url , archive_path ) ) filepath , _ = urllib . request . urlretrieve ( url , archive_path ) print "Successfully downloaded " + filepath if not os . path . exists ( dialogs_path ) : print ( "Unpacking dialogs ..." ) with tarfile . open ( archive_path ) as tar : tar . extractall ( path = directory ) print ( "Archive unpacked." ) return
Download and unpack dialogs if necessary .
37,404
def fib ( n ) : assert n > 0 a , b = 1 , 1 for i in range ( n - 1 ) : a , b = b , a + b return a
Fibonacci example function
37,405
def main ( args ) : args = parse_args ( args ) setup_logging ( args . loglevel ) _logger . debug ( "Starting crazy calculations..." ) print ( "The {}-th Fibonacci number is {}" . format ( args . n , fib ( args . n ) ) ) _logger . info ( "Script ends here" )
Main entry point allowing external calls
37,406
def optimize_feature_power ( df , output_column_name = None , exponents = [ 2. , 1. , .8 , .5 , .25 , .1 , .01 ] ) : output_column_name = list ( df . columns ) [ - 1 ] if output_column_name is None else output_column_name input_column_names = [ colname for colname in df . columns if output_column_name != colname ] results = np . zeros ( ( len ( exponents ) , len ( input_column_names ) ) ) for rownum , exponent in enumerate ( exponents ) : for colnum , column_name in enumerate ( input_column_names ) : results [ rownum , colnum ] = ( df [ output_column_name ] ** exponent ) . corr ( df [ column_name ] ) results = pd . DataFrame ( results , columns = input_column_names , index = pd . Series ( exponents , name = 'power' ) ) return results
Plot the correlation coefficient for various exponential scalings of input features
37,407
def representative_sample ( X , num_samples , save = False ) : X = X . values if hasattr ( X , 'values' ) else np . array ( X ) N , M = X . shape rownums = np . arange ( N ) np . random . shuffle ( rownums ) idx = AnnoyIndex ( M ) for i , row in enumerate ( X ) : idx . add_item ( i , row ) idx . build ( int ( np . log2 ( N ) ) + 1 ) if save : if isinstance ( save , basestring ) : idxfilename = save else : idxfile = tempfile . NamedTemporaryFile ( delete = False ) idxfile . close ( ) idxfilename = idxfile . name idx . save ( idxfilename ) idx = AnnoyIndex ( M ) idx . load ( idxfile . name ) samples = - 1 * np . ones ( shape = ( num_samples , ) , dtype = int ) samples [ 0 ] = rownums [ 0 ] j , num_nns = 0 , min ( 1000 , int ( num_samples / 2. + 1 ) ) for i in rownums : if i in samples : continue nns = idx . get_nns_by_item ( i , num_nns ) samples [ j + 1 ] = np . setdiff1d ( nns , samples ) [ - 1 ] if len ( num_nns ) < num_samples / 3. : num_nns = min ( N , 1.3 * num_nns ) j += 1 return samples
Sample vectors in X preferring edge cases and vectors farthest from other vectors in sample set
37,408
def cosine_sim ( vec1 , vec2 ) : vec1 = [ val for val in vec1 . values ( ) ] vec2 = [ val for val in vec2 . values ( ) ] dot_prod = 0 for i , v in enumerate ( vec1 ) : dot_prod += v * vec2 [ i ] mag_1 = math . sqrt ( sum ( [ x ** 2 for x in vec1 ] ) ) mag_2 = math . sqrt ( sum ( [ x ** 2 for x in vec2 ] ) ) return dot_prod / ( mag_1 * mag_2 )
Since our vectors are dictionaries lets convert them to lists for easier mathing .
37,409
def fit ( self , X , y ) : n = float ( len ( X ) ) sum_x = X . sum ( ) sum_y = y . sum ( ) sum_xy = ( X * y ) . sum ( ) sum_xx = ( X ** 2 ) . sum ( ) self . slope = ( sum_xy - ( sum_x * sum_y ) / n ) / ( sum_xx - ( sum_x * sum_x ) / n ) self . intercept = sum_y / n - self . slope * ( sum_x / n ) return self
Compute average slope and intercept for all X y pairs
37,410
def looks_like_url ( url ) : if not isinstance ( url , basestring ) : return False if not isinstance ( url , basestring ) or len ( url ) >= 1024 or not cre_url . match ( url ) : return False return True
Simplified check to see if the text appears to be a URL .
37,411
def try_parse_url ( url ) : if len ( url . strip ( ) ) < 4 : logger . info ( 'URL too short: {}' . format ( url ) ) return None try : parsed_url = urlparse ( url ) except ValueError : logger . info ( 'Parse URL ValueError: {}' . format ( url ) ) return None if parsed_url . scheme : return parsed_url try : parsed_url = urlparse ( 'http://' + parsed_url . geturl ( ) ) except ValueError : logger . info ( 'Invalid URL for assumed http scheme: urlparse("{}") from "{}" ' . format ( 'http://' + parsed_url . geturl ( ) , url ) ) return None if not parsed_url . scheme : logger . info ( 'Unable to guess a scheme for URL: {}' . format ( url ) ) return None return parsed_url
User urlparse to try to parse URL returning None on exception
37,412
def get_url_filemeta ( url ) : parsed_url = try_parse_url ( url ) if parsed_url is None : return None if parsed_url . scheme . startswith ( 'ftp' ) : return get_ftp_filemeta ( parsed_url ) url = parsed_url . geturl ( ) try : r = requests . get ( url , stream = True , allow_redirects = True , timeout = 5 ) remote_size = r . headers . get ( 'Content-Length' , - 1 ) return dict ( url = url , hostname = parsed_url . hostname , path = parsed_url . path , username = parsed_url . username , remote_size = remote_size , filename = os . path . basename ( parsed_url . path ) ) except ConnectionError : return None except ( InvalidURL , InvalidSchema , InvalidHeader , MissingSchema ) : return None return None
Request HTML for the page at the URL indicated and return the url filename and remote size
37,413
def save_response_content ( response , filename = 'data.csv' , destination = os . path . curdir , chunksize = 32768 ) : chunksize = chunksize or 32768 if os . path . sep in filename : full_destination_path = filename else : full_destination_path = os . path . join ( destination , filename ) full_destination_path = expand_filepath ( full_destination_path ) with open ( full_destination_path , "wb" ) as f : for chunk in tqdm ( response . iter_content ( CHUNK_SIZE ) ) : if chunk : f . write ( chunk ) return full_destination_path
For streaming response from requests download the content one CHUNK at a time
37,414
def download_file_from_google_drive ( driveid , filename = None , destination = os . path . curdir ) : if '&id=' in driveid : driveid = driveid . split ( '&id=' ) [ - 1 ] if '?id=' in driveid : driveid = driveid . split ( '?id=' ) [ - 1 ] URL = "https://docs.google.com/uc?export=download" session = requests . Session ( ) response = session . get ( URL , params = { 'id' : driveid } , stream = True ) token = get_response_confirmation_token ( response ) if token : params = { 'id' : driveid , 'confirm' : token } response = session . get ( URL , params = params , stream = True ) filename = filename or get_url_filename ( driveid = driveid ) full_destination_path = save_response_content ( response , filename = fileanme , destination = destination ) return os . path . abspath ( destination )
Download script for google drive shared links
37,415
def find_greeting ( s ) : if s [ 0 ] == 'H' : if s [ : 3 ] in [ 'Hi' , 'Hi ' , 'Hi,' , 'Hi!' ] : return s [ : 2 ] elif s [ : 6 ] in [ 'Hello' , 'Hello ' , 'Hello,' , 'Hello!' ] : return s [ : 5 ] elif s [ 0 ] == 'Y' : if s [ 1 ] == 'o' and s [ : 3 ] in [ 'Yo' , 'Yo,' , 'Yo ' , 'Yo!' ] : return s [ : 2 ] return None
Return the the greeting string Hi Hello or Yo if it occurs at the beginning of a string
37,416
def file_to_list ( in_file ) : lines = [ ] for line in in_file : line = line . strip ( '\n' ) if line != '' : if line [ 0 ] != '#' : lines . append ( line ) return lines
Reads file into list
37,417
def add_flag_values ( self , entry , flag ) : if flag in self . flags : self . flags [ flag ] . append ( entry )
Adds flag value to applicable compounds
37,418
def get_regex ( self ) : regex = '' for flag in self . compound : if flag == '?' or flag == '*' : regex += flag else : regex += '(' + '|' . join ( self . flags [ flag ] ) + ')' return regex
Generates and returns compound regular expression
37,419
def __parse_dict ( self ) : i = 0 lines = self . lines for line in lines : line = line . split ( '/' ) word = line [ 0 ] flags = line [ 1 ] if len ( line ) > 1 else None self . num_words += 1 if flags != None : for flag in flags : if flag in self . aff . compound_flags or flag == self . aff . only_in_compound_flag : for rule in self . aff . compound_rules : rule . add_flag_values ( word , flag ) else : if self . aff . no_suggest_flag == flag : pass else : affix_rule_entries = self . aff . affix_rules [ flag ] for i in range ( len ( affix_rule_entries ) ) : rule = affix_rule_entries [ i ] if rule . meets_condition ( word ) : if word not in self . words : self . words [ word ] = [ ] self . num_words += 1 if self . format == "addsub" : add_sub = rule . generate_add_sub ( ) if add_sub not in self . keys : self . keys . append ( add_sub ) if self . key : self . words [ word ] . append ( str ( self . keys . index ( add_sub ) ) ) else : self . words [ word ] . append ( rule . generate_add_sub ( ) ) else : self . words [ word ] . append ( rule . create_derivative ( word ) ) else : self . words [ word ] = [ ] for rule in self . aff . compound_rules : self . regex_compounds . append ( rule . get_regex ( ) )
Parses dictionary with according rules
37,420
def load_imdb_df ( dirpath = os . path . join ( BIGDATA_PATH , 'aclImdb' ) , subdirectories = ( ( 'train' , 'test' ) , ( 'pos' , 'neg' , 'unsup' ) ) ) : dfs = { } for subdirs in tqdm ( list ( product ( * subdirectories ) ) ) : urlspath = os . path . join ( dirpath , subdirs [ 0 ] , 'urls_{}.txt' . format ( subdirs [ 1 ] ) ) if not os . path . isfile ( urlspath ) : if subdirs != ( 'test' , 'unsup' ) : logger . warning ( 'Unable to find expected IMDB review list of URLs: {}' . format ( urlspath ) ) continue df = pd . read_csv ( urlspath , header = None , names = [ 'url' ] ) df [ 'url' ] = series_strip ( df . url , endswith = '/usercomments' ) textsdir = os . path . join ( dirpath , subdirs [ 0 ] , subdirs [ 1 ] ) if not os . path . isdir ( textsdir ) : logger . warning ( 'Unable to find expected IMDB review text subdirectory: {}' . format ( textsdir ) ) continue filenames = [ fn for fn in os . listdir ( textsdir ) if fn . lower ( ) . endswith ( '.txt' ) ] df [ 'index0' ] = subdirs [ 0 ] df [ 'index1' ] = subdirs [ 1 ] df [ 'index2' ] = np . array ( [ int ( fn [ : - 4 ] . split ( '_' ) [ 0 ] ) for fn in filenames ] ) df [ 'rating' ] = np . array ( [ int ( fn [ : - 4 ] . split ( '_' ) [ 1 ] ) for fn in filenames ] ) texts = [ ] for fn in filenames : with ensure_open ( os . path . join ( textsdir , fn ) ) as f : texts . append ( f . read ( ) ) df [ 'text' ] = np . array ( texts ) del texts df . set_index ( 'index0 index1 index2' . split ( ) , inplace = True ) df . sort_index ( inplace = True ) dfs [ subdirs ] = df return pd . concat ( dfs . values ( ) )
Walk directory tree starting at path to compile a DataFrame of movie review text labeled with their 1 - 10 star ratings
37,421
def load_glove ( filepath , batch_size = 1000 , limit = None , verbose = True ) : r num_dim = isglove ( filepath ) tqdm_prog = tqdm if verbose else no_tqdm wv = KeyedVectors ( num_dim ) if limit : vocab_size = int ( limit ) else : with ensure_open ( filepath ) as fin : for i , line in enumerate ( fin ) : pass vocab_size = i + 1 wv . vectors = np . zeros ( ( vocab_size , num_dim ) , REAL ) with ensure_open ( filepath ) as fin : batch , words = [ ] , [ ] for i , line in enumerate ( tqdm_prog ( fin , total = vocab_size ) ) : line = line . split ( ) word = line [ 0 ] vector = np . array ( line [ 1 : ] ) . astype ( float ) wv . index2word . append ( word ) wv . vocab [ word ] = Vocab ( index = i , count = vocab_size - i ) wv . vectors [ i ] = vector if len ( words ) >= batch_size : batch , words = [ ] , [ ] if i >= vocab_size - 1 : break if words : wv [ words ] = np . array ( batch ) return wv
r Load a pretrained GloVE word vector model
37,422
def load_glove_df ( filepath , ** kwargs ) : pdkwargs = dict ( index_col = 0 , header = None , sep = r'\s' , skiprows = [ 0 ] , verbose = False , engine = 'python' ) pdkwargs . update ( kwargs ) return pd . read_csv ( filepath , ** pdkwargs )
Load a GloVE - format text file into a dataframe
37,423
def get_en2fr ( url = 'http://www.manythings.org/anki/fra-eng.zip' ) : download_unzip ( url ) return pd . read_table ( url , compression = 'zip' , header = None , skip_blank_lines = True , sep = '\t' , skiprows = 0 , names = 'en fr' . split ( ) )
Download and parse English - > French translation dataset used in Keras seq2seq example
37,424
def load_anki_df ( language = 'deu' ) : if os . path . isfile ( language ) : filepath = language lang = re . search ( '[a-z]{3}-eng/' , filepath ) . group ( ) [ : 3 ] . lower ( ) else : lang = ( language or 'deu' ) . lower ( ) [ : 3 ] filepath = os . path . join ( BIGDATA_PATH , '{}-eng' . format ( lang ) , '{}.txt' . format ( lang ) ) df = pd . read_table ( filepath , skiprows = 1 , header = None ) df . columns = [ 'eng' , lang ] return df
Load into a DataFrame statements in one language along with their translation into English
37,425
def generate_big_urls_glove ( bigurls = None ) : bigurls = bigurls or { } for num_dim in ( 50 , 100 , 200 , 300 ) : for suffixes , num_words in zip ( ( 'sm -sm _sm -small _small' . split ( ) , 'med -med _med -medium _medium' . split ( ) , 'lg -lg _lg -large _large' . split ( ) ) , ( 6 , 42 , 840 ) ) : for suf in suffixes [ : - 1 ] : name = 'glove' + suf + str ( num_dim ) dirname = 'glove.{num_words}B' . format ( num_words = num_words ) filename = dirname + '.{num_dim}d.w2v.txt' . format ( num_dim = num_dim ) bigurl_tuple = BIG_URLS [ 'glove' + suffixes [ - 1 ] ] bigurls [ name ] = list ( bigurl_tuple [ : 2 ] ) bigurls [ name ] . append ( os . path . join ( dirname , filename ) ) bigurls [ name ] . append ( load_glove ) bigurls [ name ] = tuple ( bigurls [ name ] ) return bigurls
Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality
37,426
def normalize_ext_rename ( filepath ) : logger . debug ( 'normalize_ext.filepath=' + str ( filepath ) ) new_file_path = normalize_ext ( filepath ) logger . debug ( 'download_unzip.new_filepaths=' + str ( new_file_path ) ) filepath = rename_file ( filepath , new_file_path ) logger . debug ( 'download_unzip.filepath=' + str ( filepath ) ) return filepath
normalize file ext like . tgz - > . tar . gz and 300d . txt - > 300d . glove . txt and rename the file
37,427
def untar ( fname , verbose = True ) : if fname . lower ( ) . endswith ( ".tar.gz" ) : dirpath = os . path . join ( BIGDATA_PATH , os . path . basename ( fname ) [ : - 7 ] ) if os . path . isdir ( dirpath ) : return dirpath with tarfile . open ( fname ) as tf : members = tf . getmembers ( ) for member in tqdm ( members , total = len ( members ) ) : tf . extract ( member , path = BIGDATA_PATH ) dirpath = os . path . join ( BIGDATA_PATH , members [ 0 ] . name ) if os . path . isdir ( dirpath ) : return dirpath else : logger . warning ( "Not a tar.gz file: {}" . format ( fname ) )
Uunzip and untar a tar . gz file into a subdir of the BIGDATA_PATH directory
37,428
def endswith_strip ( s , endswith = '.txt' , ignorecase = True ) : if ignorecase : if s . lower ( ) . endswith ( endswith . lower ( ) ) : return s [ : - len ( endswith ) ] else : if s . endswith ( endswith ) : return s [ : - len ( endswith ) ] return s
Strip a suffix from the end of a string
37,429
def startswith_strip ( s , startswith = 'http://' , ignorecase = True ) : if ignorecase : if s . lower ( ) . startswith ( startswith . lower ( ) ) : return s [ len ( startswith ) : ] else : if s . endswith ( startswith ) : return s [ len ( startswith ) : ] return s
Strip a prefix from the beginning of a string
37,430
def get_longest_table ( url = 'https://www.openoffice.org/dev_docs/source/file_extensions.html' , header = 0 ) : dfs = pd . read_html ( url , header = header ) return longest_table ( dfs )
Retrieve the HTML tables from a URL and return the longest DataFrame found
37,431
def get_filename_extensions ( url = 'https://www.webopedia.com/quick_ref/fileextensionsfull.asp' ) : df = get_longest_table ( url ) columns = list ( df . columns ) columns [ 0 ] = 'ext' columns [ 1 ] = 'description' if len ( columns ) > 2 : columns [ 2 ] = 'details' df . columns = columns return df
Load a DataFrame of filename extensions from the indicated url
37,432
def create_big_url ( name ) : global BIG_URLS filemeta = get_url_filemeta ( name ) if not filemeta : return None filename = filemeta [ 'filename' ] remote_size = filemeta [ 'remote_size' ] url = filemeta [ 'url' ] name = filename . split ( '.' ) name = ( name [ 0 ] if name [ 0 ] not in ( '' , '.' ) else name [ 1 ] ) . replace ( ' ' , '-' ) name = name . lower ( ) . strip ( ) BIG_URLS [ name ] = ( url , int ( remote_size or - 1 ) , filename ) return name
If name looks like a url with an http add an entry for it in BIG_URLS
37,433
def get_data ( name = 'sms-spam' , nrows = None , limit = None ) : nrows = nrows or limit if name in BIG_URLS : logger . info ( 'Downloading {}' . format ( name ) ) filepaths = download_unzip ( name , normalize_filenames = True ) logger . debug ( 'nlpia.loaders.get_data.filepaths=' + str ( filepaths ) ) filepath = filepaths [ name ] [ 0 ] if isinstance ( filepaths [ name ] , ( list , tuple ) ) else filepaths [ name ] logger . debug ( 'nlpia.loaders.get_data.filepath=' + str ( filepath ) ) filepathlow = filepath . lower ( ) if len ( BIG_URLS [ name ] ) >= 4 : kwargs = BIG_URLS [ name ] [ 4 ] if len ( BIG_URLS [ name ] ) >= 5 else { } return BIG_URLS [ name ] [ 3 ] ( filepath , ** kwargs ) if filepathlow . endswith ( '.w2v.txt' ) : try : return KeyedVectors . load_word2vec_format ( filepath , binary = False , limit = nrows ) except ( TypeError , UnicodeError ) : pass if filepathlow . endswith ( '.w2v.bin' ) or filepathlow . endswith ( '.bin.gz' ) or filepathlow . endswith ( '.w2v.bin.gz' ) : try : return KeyedVectors . load_word2vec_format ( filepath , binary = True , limit = nrows ) except ( TypeError , UnicodeError ) : pass if filepathlow . endswith ( '.gz' ) : try : filepath = ensure_open ( filepath ) except : pass if re . match ( r'.json([.][a-z]{0,3}){0,2}' , filepathlow ) : return read_json ( filepath ) if filepathlow . endswith ( '.tsv.gz' ) or filepathlow . endswith ( '.tsv' ) : try : return pd . read_table ( filepath ) except : pass if filepathlow . endswith ( '.csv.gz' ) or filepathlow . endswith ( '.csv' ) : try : return read_csv ( filepath ) except : pass if filepathlow . endswith ( '.txt' ) : try : return read_txt ( filepath ) except ( TypeError , UnicodeError ) : pass return filepaths [ name ] elif name in DATASET_NAME2FILENAME : return read_named_csv ( name , nrows = nrows ) elif name in DATA_NAMES : return read_named_csv ( DATA_NAMES [ name ] , nrows = nrows ) elif os . path . isfile ( name ) : return read_named_csv ( name , nrows = nrows ) elif os . path . isfile ( os . path . join ( DATA_PATH , name ) ) : return read_named_csv ( os . path . join ( DATA_PATH , name ) , nrows = nrows ) msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n' . format ( name , DATA_PATH , BIGDATA_PATH ) msg += 'Available dataset names include:\n{}' . format ( '\n' . join ( DATASET_NAMES ) ) logger . error ( msg ) raise IOError ( msg )
Load data from a json csv or txt file if it exists in the data dir .
37,434
def get_wikidata_qnum ( wikiarticle , wikisite ) : resp = requests . get ( 'https://www.wikidata.org/w/api.php' , timeout = 5 , params = { 'action' : 'wbgetentities' , 'titles' : wikiarticle , 'sites' : wikisite , 'props' : '' , 'format' : 'json' } ) . json ( ) return list ( resp [ 'entities' ] ) [ 0 ]
Retrieve the Query number for a wikidata database of metadata about a particular article
37,435
def normalize_column_names ( df ) : r columns = df . columns if hasattr ( df , 'columns' ) else df columns = [ c . lower ( ) . replace ( ' ' , '_' ) for c in columns ] return columns
r Clean up whitespace in column names . See better version at pugnlp . clean_columns
37,436
def clean_column_values ( df , inplace = True ) : r dollars_percents = re . compile ( r'[%$,;\s]+' ) if not inplace : df = df . copy ( ) for c in df . columns : values = None if df [ c ] . dtype . char in '<U S O' . split ( ) : try : values = df [ c ] . copy ( ) values = values . fillna ( '' ) values = values . astype ( str ) . str . replace ( dollars_percents , '' ) if values . str . len ( ) . sum ( ) > .2 * df [ c ] . astype ( str ) . str . len ( ) . sum ( ) : values [ values . isnull ( ) ] = np . nan values [ values == '' ] = np . nan values = values . astype ( float ) except ValueError : values = None except : logger . error ( 'Error on column {} with dtype {}' . format ( c , df [ c ] . dtype ) ) raise if values is not None : if values . isnull ( ) . sum ( ) < .6 * len ( values ) and values . any ( ) : df [ c ] = values return df
r Convert dollar value strings numbers with commas and percents into floating point values
37,437
def isglove ( filepath ) : with ensure_open ( filepath , 'r' ) as f : header_line = f . readline ( ) vector_line = f . readline ( ) try : num_vectors , num_dim = header_line . split ( ) return int ( num_dim ) except ( ValueError , TypeError ) : pass vector = vector_line . split ( ) [ 1 : ] if len ( vector ) % 10 : print ( vector ) print ( len ( vector ) % 10 ) return False try : vector = np . array ( [ float ( x ) for x in vector ] ) except ( ValueError , TypeError ) : return False if np . all ( np . abs ( vector ) < 12. ) : return len ( vector ) return False
Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
37,438
def nlp ( texts , lang = 'en' , linesep = None , verbose = True ) : r linesep = os . linesep if linesep in ( 'default' , True , 1 , 'os' ) else linesep tqdm_prog = no_tqdm if ( not verbose or ( hasattr ( texts , '__len__' ) and len ( texts ) < 3 ) ) else tqdm global _parse if not _parse : try : _parse = spacy . load ( lang ) except ( OSError , IOError ) : try : spacy . cli . download ( lang ) except URLError : logger . warning ( "Unable to download Spacy language model '{}' so nlp(text) just returns text.split()" . format ( lang ) ) parse = _parse or str . split if isinstance ( texts , str ) : if linesep : return nlp ( texts . split ( linesep ) ) else : return nlp ( [ texts ] ) if hasattr ( texts , '__len__' ) : if len ( texts ) == 1 : return parse ( texts [ 0 ] ) elif len ( texts ) > 1 : return [ ( parse or str . split ) ( text ) for text in tqdm_prog ( texts ) ] else : return None else : return ( parse ( text ) for text in tqdm_prog ( texts ) )
r Use the SpaCy parser to parse and tag natural language strings .
37,439
def get_decoder ( libdir = None , modeldir = None , lang = 'en-us' ) : modeldir = modeldir or ( os . path . join ( libdir , 'model' ) if libdir else MODELDIR ) libdir = os . path . dirname ( modeldir ) config = ps . Decoder . default_config ( ) config . set_string ( '-hmm' , os . path . join ( modeldir , lang ) ) config . set_string ( '-lm' , os . path . join ( modeldir , lang + '.lm.bin' ) ) config . set_string ( '-dict' , os . path . join ( modeldir , 'cmudict-' + lang + '.dict' ) ) print ( config ) return ps . Decoder ( config )
Create a decoder with the requested language model
37,440
def transcribe ( decoder , audio_file , libdir = None ) : decoder = get_decoder ( ) decoder . start_utt ( ) stream = open ( audio_file , 'rb' ) while True : buf = stream . read ( 1024 ) if buf : decoder . process_raw ( buf , False , False ) else : break decoder . end_utt ( ) return evaluate_results ( decoder )
Decode streaming audio data from raw binary file on disk .
37,441
def pre_process_data ( filepath ) : positive_path = os . path . join ( filepath , 'pos' ) negative_path = os . path . join ( filepath , 'neg' ) pos_label = 1 neg_label = 0 dataset = [ ] for filename in glob . glob ( os . path . join ( positive_path , '*.txt' ) ) : with open ( filename , 'r' ) as f : dataset . append ( ( pos_label , f . read ( ) ) ) for filename in glob . glob ( os . path . join ( negative_path , '*.txt' ) ) : with open ( filename , 'r' ) as f : dataset . append ( ( neg_label , f . read ( ) ) ) shuffle ( dataset ) return dataset
This is dependent on your training data source but we will try to generalize it as best as possible .
37,442
def pad_trunc ( data , maxlen ) : new_data = [ ] zero_vector = [ ] for _ in range ( len ( data [ 0 ] [ 0 ] ) ) : zero_vector . append ( 0.0 ) for sample in data : if len ( sample ) > maxlen : temp = sample [ : maxlen ] elif len ( sample ) < maxlen : temp = sample additional_elems = maxlen - len ( sample ) for _ in range ( additional_elems ) : temp . append ( zero_vector ) else : temp = sample new_data . append ( temp ) return new_data
For a given dataset pad with zero vectors or truncate to maxlen
37,443
def clean_data ( data ) : new_data = [ ] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data : new_sample = [ ] for char in sample [ 1 ] . lower ( ) : if char in VALID : new_sample . append ( char ) else : new_sample . append ( 'UNK' ) new_data . append ( new_sample ) return new_data
Shift to lower case replace unknowns with UNK and listify
37,444
def char_pad_trunc ( data , maxlen ) : new_dataset = [ ] for sample in data : if len ( sample ) > maxlen : new_data = sample [ : maxlen ] elif len ( sample ) < maxlen : pads = maxlen - len ( sample ) new_data = sample + [ 'PAD' ] * pads else : new_data = sample new_dataset . append ( new_data ) return new_dataset
We truncate to maxlen or add in PAD tokens
37,445
def create_dicts ( data ) : chars = set ( ) for sample in data : chars . update ( set ( sample ) ) char_indices = dict ( ( c , i ) for i , c in enumerate ( chars ) ) indices_char = dict ( ( i , c ) for i , c in enumerate ( chars ) ) return char_indices , indices_char
Modified from Keras LSTM example
37,446
def onehot_encode ( dataset , char_indices , maxlen ) : X = np . zeros ( ( len ( dataset ) , maxlen , len ( char_indices . keys ( ) ) ) ) for i , sentence in enumerate ( dataset ) : for t , char in enumerate ( sentence ) : X [ i , t , char_indices [ char ] ] = 1 return X
One hot encode the tokens
37,447
def _fit_full ( self = self , X = X , n_components = 6 ) : n_samples , n_features = X . shape self . mean_ = np . mean ( X , axis = 0 ) print ( self . mean_ ) X -= self . mean_ print ( X . round ( 2 ) ) U , S , V = linalg . svd ( X , full_matrices = False ) print ( V . round ( 2 ) ) U , V = svd_flip ( U , V ) components_ = V print ( components_ . round ( 2 ) ) explained_variance_ = ( S ** 2 ) / ( n_samples - 1 ) total_var = explained_variance_ . sum ( ) explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S . copy ( ) if n_components == 'mle' : n_components = _infer_dimension_ ( explained_variance_ , n_samples , n_features ) elif 0 < n_components < 1.0 : ratio_cumsum = stable_cumsum ( explained_variance_ratio_ ) n_components = np . searchsorted ( ratio_cumsum , n_components ) + 1 if n_components < min ( n_features , n_samples ) : self . noise_variance_ = explained_variance_ [ n_components : ] . mean ( ) else : self . noise_variance_ = 0. self . n_samples_ , self . n_features_ = n_samples , n_features self . components_ = components_ [ : n_components ] print ( self . components_ . round ( 2 ) ) self . n_components_ = n_components self . explained_variance_ = explained_variance_ [ : n_components ] self . explained_variance_ratio_ = explained_variance_ratio_ [ : n_components ] self . singular_values_ = singular_values_ [ : n_components ] return U , S , V
Fit the model by computing full SVD on X
37,448
def extract_aiml ( path = 'aiml-en-us-foundation-alice.v1-9' ) : path = find_data_path ( path ) or path if os . path . isdir ( path ) : paths = os . listdir ( path ) paths = [ os . path . join ( path , p ) for p in paths ] else : zf = zipfile . ZipFile ( path ) paths = [ ] for name in zf . namelist ( ) : if '.hg/' in name : continue paths . append ( zf . extract ( name , path = BIGDATA_PATH ) ) return paths
Extract an aiml . zip file if it hasn t been already and return a list of aiml file paths
37,449
def create_brain ( path = 'aiml-en-us-foundation-alice.v1-9.zip' ) : path = find_data_path ( path ) or path bot = Bot ( ) num_templates = bot . _brain . template_count paths = extract_aiml ( path = path ) for path in paths : if not path . lower ( ) . endswith ( '.aiml' ) : continue try : bot . learn ( path ) except AimlParserError : logger . error ( format_exc ( ) ) logger . warning ( 'AIML Parse Error: {}' . format ( path ) ) num_templates = bot . _brain . template_count - num_templates logger . info ( 'Loaded {} trigger-response pairs.\n' . format ( num_templates ) ) print ( 'Loaded {} trigger-response pairs from {} AIML files.' . format ( bot . _brain . template_count , len ( paths ) ) ) return bot
Create an aiml_bot . Bot brain from an AIML zip file or directory of AIML files
37,450
def minify_urls ( filepath , ext = 'asc' , url_regex = None , output_ext = '.urls_minified' , access_token = None ) : access_token = access_token or secrets . bitly . access_token output_ext = output_ext or '' url_regex = regex . compile ( url_regex ) if isinstance ( url_regex , str ) else url_regex filemetas = [ ] for filemeta in find_files ( filepath , ext = ext ) : filemetas += [ filemeta ] altered_text = '' with open ( filemeta [ 'path' ] , 'rt' ) as fin : text = fin . read ( ) end = 0 for match in url_regex . finditer ( text ) : url = match . group ( ) start = match . start ( ) altered_text += text [ : start ] resp = requests . get ( 'https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}' . format ( access_token , url ) , allow_redirects = True , timeout = 5 ) js = resp . json ( ) short_url = js [ 'shortUrl' ] altered_text += short_url end = start + len ( url ) altered_text += text [ end : ] with open ( filemeta [ 'path' ] + ( output_ext or '' ) , 'wt' ) as fout : fout . write ( altered_text ) return altered_text
Use bitly or similar minifier to shrink all URLs in text files within a folder structure .
37,451
def delimit_slug ( slug , sep = ' ' ) : hyphenated_slug = re . sub ( CRE_SLUG_DELIMITTER , sep , slug ) return hyphenated_slug
Return a str of separated tokens found within a slugLike_This = > slug Like This
37,452
def clean_asciidoc ( text ) : r text = re . sub ( r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])' , r'"\2' , text ) text = re . sub ( r'([a-zA-Z0-9])[\]_*]{1,2}' , r'\1"' , text ) return text
r Transform asciidoc text into ASCII text that NL parsers can handle
37,453
def split_sentences_regex ( text ) : parts = regex . split ( r'([a-zA-Z0-9][.?!])[\s$]' , text ) sentences = [ '' . join ( s ) for s in zip ( parts [ 0 : : 2 ] , parts [ 1 : : 2 ] ) ] return sentences + [ parts [ - 1 ] ] if len ( parts ) % 2 else sentences
Use dead - simple regex to split text into sentences . Very poor accuracy .
37,454
def split_sentences_spacy ( text , language_model = 'en' ) : r doc = nlp ( text ) sentences = [ ] if not hasattr ( doc , 'sents' ) : logger . warning ( "Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded" ) return split_sentences_nltk ( text ) for w , span in enumerate ( doc . sents ) : sent = '' . join ( doc [ i ] . string for i in range ( span . start , span . end ) ) . strip ( ) if len ( sent ) : sentences . append ( sent ) return sentences
r You must download a spacy language model with python - m download en
37,455
def segment_sentences ( path = os . path . join ( DATA_PATH , 'book' ) , splitter = split_sentences_nltk , ** find_files_kwargs ) : sentences = [ ] if os . path . isdir ( path ) : for filemeta in find_files ( path , ** find_files_kwargs ) : with open ( filemeta [ 'path' ] ) as fin : i , batch = 0 , [ ] try : for i , line in enumerate ( fin ) : if not line . strip ( ) : sentences . extend ( splitter ( '\n' . join ( batch ) ) ) batch = [ line ] else : batch . append ( line ) except ( UnicodeDecodeError , IOError ) : logger . error ( 'UnicodeDecodeError or IOError on line {} in file {} from stat: {}' . format ( i + 1 , fin . name , filemeta ) ) raise if len ( batch ) : sentences . extend ( splitter ( '\n' . join ( batch ) ) ) else : batch = [ ] for i , line in enumerate ( iter_lines ( path ) ) : if not line . strip ( ) : sentences . extend ( splitter ( '\n' . join ( batch ) ) ) batch = [ line ] else : batch . append ( line ) if len ( batch ) : sentences . extend ( splitter ( '\n' . join ( batch ) ) ) return sentences
Return a list of all sentences and empty lines .
37,456
def fix_hunspell_json ( badjson_path = 'en_us.json' , goodjson_path = 'en_us_fixed.json' ) : with open ( badjson_path , 'r' ) as fin : with open ( goodjson_path , 'w' ) as fout : for i , line in enumerate ( fin ) : line2 = regex . sub ( r'\[(\w)' , r'["\1' , line ) line2 = regex . sub ( r'(\w)\]' , r'\1"]' , line2 ) line2 = regex . sub ( r'(\w),(\w)' , r'\1","\2' , line2 ) fout . write ( line2 ) with open ( goodjson_path , 'r' ) as fin : words = [ ] with open ( goodjson_path + '.txt' , 'w' ) as fout : hunspell = json . load ( fin ) for word , affixes in hunspell [ 'words' ] . items ( ) : words += [ word ] fout . write ( word + '\n' ) for affix in affixes : words += [ affix ] fout . write ( affix + '\n' ) return words
Fix the invalid hunspellToJSON . py json format by inserting double - quotes in list of affix strings
37,457
def format_ubuntu_dialog ( df ) : s = '' for i , record in df . iterrows ( ) : statement = list ( split_turns ( record . Context ) ) [ - 1 ] reply = list ( split_turns ( record . Utterance ) ) [ - 1 ] s += 'Statement: {}\n' . format ( statement ) s += 'Reply: {}\n\n' . format ( reply ) return s
Print statements paired with replies formatted for easy review
37,458
def splitext ( filepath ) : exts = getattr ( CRE_FILENAME_EXT . search ( filepath ) , 'group' , str ) ( ) return ( filepath [ : ( - len ( exts ) or None ) ] , exts )
Like os . path . splitext except splits compound extensions as one long one
37,459
def offline_plotly_scatter3d ( df , x = 0 , y = 1 , z = - 1 ) : data = [ ] colors = [ 'rgb(228,26,28)' , 'rgb(55,126,184)' , 'rgb(77,175,74)' ] x = get_array ( df , x , default = 0 ) y = get_array ( df , y , default = 1 ) z = get_array ( df , z , default = - 1 ) for i in range ( len ( df [ 'name' ] . unique ( ) ) ) : name = df [ 'Name' ] . unique ( ) [ i ] color = colors [ i ] x = x [ pd . np . array ( df [ 'name' ] == name ) ] y = y [ pd . np . array ( df [ 'name' ] == name ) ] z = z [ pd . np . array ( df [ 'name' ] == name ) ] trace = dict ( name = name , x = x , y = y , z = z , type = "scatter3d" , mode = 'markers' , marker = dict ( size = 3 , color = color , line = dict ( width = 0 ) ) ) data . append ( trace ) layout = dict ( width = 800 , height = 550 , autosize = False , title = 'Iris dataset' , scene = dict ( xaxis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' ) , yaxis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' ) , zaxis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' ) , aspectratio = dict ( x = 1 , y = 1 , z = 0.7 ) , aspectmode = 'manual' ) , ) fig = dict ( data = data , layout = layout ) url = plotly . offline . plot ( fig , filename = 'pandas-3d-iris' , validate = False ) return url
Plot an offline scatter plot colored according to the categories in the name column .
37,460
def offline_plotly_data ( data , filename = None , config = None , validate = True , default_width = '100%' , default_height = 525 , global_requirejs = False ) : r config_default = dict ( DEFAULT_PLOTLY_CONFIG ) if config is not None : config_default . update ( config ) with open ( os . path . join ( DATA_PATH , 'plotly.js.min' ) , 'rt' ) as f : js = f . read ( ) html , divid , width , height = _plot_html ( data , config = config_default , validate = validate , default_width = default_width , default_height = default_height , global_requirejs = global_requirejs ) html = PLOTLY_HTML . format ( plotlyjs = js , plotlyhtml = html ) if filename and isinstance ( filename , str ) : with open ( filename , 'wt' ) as f : f . write ( html ) return html
r Write a plotly scatter plot to HTML file that doesn t require server
37,461
def normalize_etpinard_df ( df = 'https://plot.ly/~etpinard/191.csv' , columns = 'x y size text' . split ( ) , category_col = 'category' , possible_categories = [ 'Africa' , 'Americas' , 'Asia' , 'Europe' , 'Oceania' ] ) : possible_categories = [ 'Africa' , 'Americas' , 'Asia' , 'Europe' , 'Oceania' ] if possible_categories is None else possible_categories df . columns = clean_columns ( df . columns ) df = pd . read_csv ( df ) if isinstance ( df , str ) else df columns = clean_columns ( list ( columns ) ) df2 = pd . DataFrame ( columns = columns ) df2 [ category_col ] = np . concatenate ( [ np . array ( [ categ ] * len ( df ) ) for categ in possible_categories ] ) columns = zip ( columns , [ [ clean_columns ( categ + ', ' + column ) for categ in possible_categories ] for column in columns ] ) for col , category_cols in columns : df2 [ col ] = np . concatenate ( [ df [ label ] . values for label in category_cols ] ) return df2
Reformat a dataframe in etpinard s format for use in plot functions and sklearn models
37,462
def offline_plotly_scatter_bubble ( df , x = 'x' , y = 'y' , size_col = 'size' , text_col = 'text' , category_col = 'category' , possible_categories = None , filename = None , config = { 'displaylogo' : False } , xscale = None , yscale = 'log' , layout = { 'hovermode' : 'closest' , 'showlegend' : False , 'autosize' : True } , marker = { 'sizemode' : 'area' } , min_size = 10 , ) : r config_default = dict ( DEFAULT_PLOTLY_CONFIG ) marker_default = { 'size' : size_col or min_size , 'sizemode' : 'area' , 'sizeref' : int ( df [ size_col ] . min ( ) * .8 ) if size_col else min_size } marker_default . update ( marker ) size_col = marker_default . pop ( 'size' ) layout_default = { 'xaxis' : XAxis ( title = x , type = xscale ) , 'yaxis' : YAxis ( title = y , type = yscale ) , } layout_default . update ( ** layout ) if config is not None : config_default . update ( config ) df . columns = clean_columns ( df . columns ) if possible_categories is None and category_col is not None : if category_col in df . columns : category_labels = df [ category_col ] else : category_labels = np . array ( category_col ) possible_categories = list ( set ( category_labels ) ) possible_categories = [ None ] if possible_categories is None else possible_categories if category_col and category_col in df : masks = [ np . array ( df [ category_col ] == label ) for label in possible_categories ] else : masks = [ np . array ( [ True ] * len ( df ) ) ] * len ( possible_categories ) data = { 'data' : [ Scatter ( x = df [ x ] [ mask ] . values , y = df [ y ] [ mask ] . values , text = df [ text_col ] [ mask ] . values , marker = Marker ( size = df [ size_col ] [ mask ] if size_col in df . columns else size_col , ** marker_default ) , mode = 'markers' , name = str ( category_name ) ) for ( category_name , mask ) in zip ( possible_categories , masks ) ] , 'layout' : Layout ( ** layout_default ) } return offline_plotly_data ( data , filename = filename , config = config_default )
r Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns
37,463
def format_hex ( i , num_bytes = 4 , prefix = '0x' ) : prefix = str ( prefix or '' ) i = int ( i or 0 ) return prefix + '{0:0{1}x}' . format ( i , num_bytes )
Format hexidecimal string from decimal integer value
37,464
def is_up_url ( url , allow_redirects = False , timeout = 5 ) : r if not isinstance ( url , basestring ) or '.' not in url : return False normalized_url = prepend_http ( url ) session = requests . Session ( ) session . mount ( url , HTTPAdapter ( max_retries = 2 ) ) try : resp = session . get ( normalized_url , allow_redirects = allow_redirects , timeout = timeout ) except ConnectionError : return None except : return None if resp . status_code in ( 301 , 302 , 307 ) or resp . headers . get ( 'location' , None ) : return resp . headers . get ( 'location' , None ) elif 100 <= resp . status_code < 400 : return normalized_url else : return False
r Check URL to see if it is a valid web page return the redirected location if it is
37,465
def get_markdown_levels ( lines , levels = set ( ( 0 , 1 , 2 , 3 , 4 , 5 , 6 ) ) ) : r if isinstance ( levels , ( int , float , basestring , str , bytes ) ) : levels = [ float ( levels ) ] levels = set ( [ int ( i ) for i in levels ] ) if isinstance ( lines , basestring ) : lines = lines . splitlines ( ) level_lines = [ ] for line in lines : level_line = None if 0 in levels : level_line = ( 0 , line ) lstripped = line . lstrip ( ) for i in range ( 6 , 1 , - 1 ) : if lstripped . startswith ( '#' * i ) : level_line = ( i , lstripped [ i : ] . lstrip ( ) ) break if level_line and level_line [ 0 ] in levels : level_lines . append ( level_line ) return level_lines
r Return a list of 2 - tuples with a level integer for the heading levels
37,466
def iter_lines ( url_or_text , ext = None , mode = 'rt' ) : r if url_or_text is None or not url_or_text : return [ ] elif isinstance ( url_or_text , ( str , bytes , basestring ) ) : if '\n' in url_or_text or '\r' in url_or_text : return StringIO ( url_or_text ) elif os . path . isfile ( os . path . join ( DATA_PATH , url_or_text ) ) : return open ( os . path . join ( DATA_PATH , url_or_text ) , mode = mode ) elif os . path . isfile ( url_or_text ) : return open ( os . path . join ( url_or_text ) , mode = mode ) if os . path . isdir ( url_or_text ) : filepaths = [ filemeta [ 'path' ] for filemeta in find_files ( url_or_text , ext = ext ) ] return itertools . chain . from_iterable ( map ( open , filepaths ) ) url = looks_like_url ( url_or_text ) if url : for i in range ( 3 ) : return requests . get ( url , stream = True , allow_redirects = True , timeout = 5 ) else : return StringIO ( url_or_text ) elif isinstance ( url_or_text , ( list , tuple ) ) : text = '' for s in url_or_text : text += '\n' . join ( list ( iter_lines ( s , ext = ext , mode = mode ) ) ) + '\n' return iter_lines ( text )
r Return an iterator over the lines of a file or URI response .
37,467
def parse_utf_html ( url = os . path . join ( DATA_PATH , 'utf8_table.html' ) ) : utf = pd . read_html ( url ) utf = [ df for df in utf if len ( df ) > 1023 and len ( df . columns ) > 2 ] [ 0 ] utf = utf . iloc [ : 1024 ] if len ( utf ) == 1025 else utf utf . columns = 'char name hex' . split ( ) utf . name = utf . name . str . replace ( '<control>' , 'CONTTROL CHARACTER' ) multiascii = [ ' ' ] * len ( utf ) asc = [ ' ' ] * len ( utf ) rows = [ ] for i , name in enumerate ( utf . name ) : if i < 128 and str . isprintable ( chr ( i ) ) : asc [ i ] = chr ( i ) else : asc [ i ] = ' ' big = re . findall ( r'CAPITAL\ LETTER\ ([a-z0-9A-Z ]+$)' , name ) small = re . findall ( r'SMALL\ LETTER\ ([a-z0-9A-Z ]+$)' , name ) pattern = r'(?P<description>' r'(?P<lang>LATIN|GREEK|COPTIC|CYRILLIC)?[\s]*' r'(?P<case>CAPITAL|SMALL)?[\s]*' r'(?P<length>CHARACTER|LETTER)?[\s]*' r'(?P<ukrainian>BYELORUSSIAN-UKRAINIAN)?[\s]*' r'(?P<name>[-_><a-z0-9A-Z\s ]+)[\s]*' r'\(?(?P<code_point>U\+[- a-fA-F0-9]{4,8})?\)?)[\s]*' match = re . match ( pattern , name ) gd = match . groupdict ( ) gd [ 'char' ] = chr ( i ) gd [ 'suffix' ] = None gd [ 'wordwith' ] = None withprefix = re . match ( r'(?P<prefix>DOTLESS|TURNED|SMALL)(?P<name>.*)' + r'(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+(?P<suffix>[-_><a-z0-9A-Z\s ]+)' , gd [ 'name' ] ) if withprefix : gd . update ( withprefix . groupdict ( ) ) withsuffix = re . match ( r'(?P<name>.*)(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+' + r'(?P<suffix>[-_><a-z0-9A-Z\s ]+)' , gd [ 'name' ] ) if withsuffix : gd . update ( withsuffix . groupdict ( ) ) gd [ 'code_point' ] = gd [ 'code_point' ] or format_hex ( i , num_bytes = 4 , prefix = 'U+' ) . upper ( ) if i < 128 : gd [ 'ascii' ] = chr ( i ) else : multiascii = gd [ 'name' ] if gd [ 'suffix' ] and gd [ 'wordwith' ] : multiascii = NAME_ACCENT . get ( gd [ 'suffix' ] , "'" ) else : if big : m = big [ 0 ] multiascii [ i ] = m if len ( m ) == 1 : asc [ i ] = m elif small : multiascii [ i ] = small [ 0 ] . lower ( ) if len ( multiascii [ i ] ) == 1 : asc [ i ] = small [ 0 ] . lower ( ) rows . append ( gd ) df = pd . DataFrame ( rows ) df . multiascii = df . multiascii . str . strip ( ) df [ 'ascii' ] = df [ 'ascii' ] . str . strip ( ) df . name = df . name . str . strip ( ) return df
Parse HTML table UTF8 char descriptions returning DataFrame with ascii and mutliascii
37,468
def clean_csvs ( dialogpath = None ) : dialogdir = os . dirname ( dialogpath ) if os . path . isfile ( dialogpath ) else dialogpath filenames = [ dialogpath . split ( os . path . sep ) [ - 1 ] ] if os . path . isfile ( dialogpath ) else os . listdir ( dialogpath ) for filename in filenames : filepath = os . path . join ( dialogdir , filename ) df = clean_df ( filepath ) df . to_csv ( filepath , header = None ) return filenames
Translate non - ASCII characters to spaces or equivalent ASCII characters
37,469
def unicode2ascii ( text , expand = True ) : r translate = UTF8_TO_ASCII if not expand else UTF8_TO_MULTIASCII output = '' for c in text : if not c or ord ( c ) < 128 : output += c else : output += translate [ c ] if c in translate else ' ' return output . strip ( )
r Translate UTF8 characters to ASCII
37,470
def clean_df ( df , header = None , ** read_csv_kwargs ) : df = read_csv ( df , header = header , ** read_csv_kwargs ) df = df . fillna ( ' ' ) for col in df . columns : df [ col ] = df [ col ] . apply ( unicode2ascii ) return df
Convert UTF8 characters in a CSV file or dataframe into ASCII
37,471
def get_acronyms ( manuscript = os . path . expanduser ( '~/code/nlpia/lane/manuscript' ) ) : acronyms = [ ] for f , lines in get_lines ( manuscript ) : for line in lines : matches = CRE_ACRONYM . finditer ( line ) if matches : for m in matches : if m . group ( 'a2' ) : acronyms . append ( ( m . group ( 'a2' ) , m . group ( 's2' ) ) ) elif m . group ( 'a3' ) : acronyms . append ( ( m . group ( 'a3' ) , m . group ( 's3' ) ) ) elif m . group ( 'a4' ) : acronyms . append ( ( m . group ( 'a4' ) , m . group ( 's4' ) ) ) elif m . group ( 'a5' ) : acronyms . append ( ( m . group ( 'a5' ) , m . group ( 's5' ) ) ) return sorted ( dict ( acronyms ) . items ( ) )
Find all the 2 and 3 - letter acronyms in the manuscript and return as a sorted list of tuples
37,472
def write_glossary ( manuscript = os . path . expanduser ( '~/code/nlpia/lane/manuscript' ) , linesep = None ) : linesep = linesep or os . linesep lines = [ '[acronyms]' , '== Acronyms' , '' , '[acronyms,template="glossary",id="terms"]' ] acronyms = get_acronyms ( manuscript ) for a in acronyms : lines . append ( '*{}*:: {} -- ' . format ( a [ 0 ] , a [ 1 ] [ 0 ] . upper ( ) + a [ 1 ] [ 1 : ] ) ) return linesep . join ( lines )
Compose an asciidoc string with acronyms culled from the manuscript
37,473
def infer_url_title ( url ) : meta = get_url_filemeta ( url ) title = '' if meta : if meta . get ( 'hostname' , url ) == 'drive.google.com' : title = get_url_title ( url ) else : title = meta . get ( 'filename' , meta [ 'hostname' ] ) or meta [ 'hostname' ] title , fileext = splitext ( title ) else : logging . error ( 'Unable to retrieve URL: {}' . format ( url ) ) return None return delimit_slug ( title , ' ' )
Guess what the page title is going to be from the path and FQDN in the URL
37,474
def translate_book ( translators = ( HyperlinkStyleCorrector ( ) . translate , translate_line_footnotes ) , book_dir = BOOK_PATH , dest = None , include_tags = None , ext = '.nlpiabak' , skip_untitled = True ) : if callable ( translators ) or not hasattr ( translators , '__len__' ) : translators = ( translators , ) sections = get_tagged_sections ( book_dir = book_dir , include_tags = include_tags ) file_line_maps = [ ] for fileid , ( filepath , tagged_lines ) in enumerate ( sections ) : logger . info ( 'filepath={}' . format ( filepath ) ) destpath = filepath if not dest : copyfile ( filepath , filepath + '.' + ext . lstrip ( '.' ) ) elif os . path . sep in dest : destpath = os . path . join ( dest , os . path . basename ( filepath ) ) else : destpath = os . path . join ( os . path . dirname ( filepath ) , dest , os . path . basename ( filepath ) ) ensure_dir_exists ( os . path . dirname ( destpath ) ) with open ( destpath , 'w' ) as fout : logger . info ( 'destpath={}' . format ( destpath ) ) for lineno , ( tag , line ) in enumerate ( tagged_lines ) : if ( include_tags is None or tag in include_tags or any ( ( tag . startswith ( t ) for t in include_tags ) ) ) : for translate in translators : new_line = translate ( line ) if line != new_line : file_line_maps . append ( ( fileid , lineno , filepath , destpath , line , new_line ) ) line = new_line fout . write ( line ) return file_line_maps
Fix any style corrections listed in translate list of translation functions
37,475
def filter_lines ( input_file , output_file , translate = lambda line : line ) : filepath , lines = get_lines ( [ input_file ] ) [ 0 ] return filepath , [ ( tag , translate ( line = line , tag = tag ) ) for ( tag , line ) in lines ]
Translate all the lines of a single file
37,476
def filter_tagged_lines ( tagged_lines , include_tags = None , exclude_tags = None ) : r include_tags = ( include_tags , ) if isinstance ( include_tags , str ) else include_tags exclude_tags = ( exclude_tags , ) if isinstance ( exclude_tags , str ) else exclude_tags for tagged_line in tagged_lines : if ( include_tags is None or tagged_line [ 0 ] in include_tags or any ( ( tagged_line [ 0 ] . startswith ( t ) for t in include_tags ) ) ) : if exclude_tags is None or not any ( ( tagged_line [ 0 ] . startswith ( t ) for t in exclude_tags ) ) : yield tagged_line else : logger . debug ( 'skipping tag {} because it starts with one of the exclude_tags={}' . format ( tagged_line [ 0 ] , exclude_tags ) ) else : logger . debug ( 'skipping tag {} because not in {}' . format ( tagged_line [ 0 ] , include_tags ) )
r Return iterable of tagged lines where the tags all start with one of the include_tags prefixes
37,477
def accuracy_study ( tdm = None , u = None , s = None , vt = None , verbosity = 0 , ** kwargs ) : smat = np . zeros ( ( len ( u ) , len ( vt ) ) ) np . fill_diagonal ( smat , s ) smat = pd . DataFrame ( smat , columns = vt . index , index = u . index ) if verbosity : print ( ) print ( 'Sigma:' ) print ( smat . round ( 2 ) ) print ( ) print ( 'Sigma without zeroing any dim:' ) print ( np . diag ( smat . round ( 2 ) ) ) tdm_prime = u . values . dot ( smat . values ) . dot ( vt . values ) if verbosity : print ( ) print ( 'Reconstructed Term-Document Matrix' ) print ( tdm_prime . round ( 2 ) ) err = [ np . sqrt ( ( ( tdm_prime - tdm ) . values . flatten ( ) ** 2 ) . sum ( ) / np . product ( tdm . shape ) ) ] if verbosity : print ( ) print ( 'Error without reducing dimensions:' ) print ( err [ - 1 ] ) smat2 = smat . copy ( ) for numdim in range ( len ( s ) - 1 , 0 , - 1 ) : smat2 . iloc [ numdim , numdim ] = 0 if verbosity : print ( 'Sigma after zeroing out dim {}' . format ( numdim ) ) print ( np . diag ( smat2 . round ( 2 ) ) ) tdm_prime2 = u . values . dot ( smat2 . values ) . dot ( vt . values ) err += [ np . sqrt ( ( ( tdm_prime2 - tdm ) . values . flatten ( ) ** 2 ) . sum ( ) / np . product ( tdm . shape ) ) ] if verbosity : print ( 'Error after zeroing out dim {}' . format ( numdim ) ) print ( err [ - 1 ] ) return err
Reconstruct the term - document matrix and measure error as SVD terms are truncated
37,478
def get_anki_phrases ( lang = 'english' , limit = None ) : lang = lang . strip ( ) . lower ( ) [ : 3 ] lang = LANG2ANKI [ lang [ : 2 ] ] if lang not in ANKI_LANGUAGES else lang if lang [ : 2 ] == 'en' : return get_anki_phrases_english ( limit = limit ) return sorted ( get_data ( lang ) . iloc [ : , - 1 ] . str . strip ( ) . values )
Retrieve as many anki paired - statement corpora as you can for the requested language
37,479
def get_anki_phrases_english ( limit = None ) : texts = set ( ) for lang in ANKI_LANGUAGES : df = get_data ( lang ) phrases = df . eng . str . strip ( ) . values texts = texts . union ( set ( phrases ) ) if limit and len ( texts ) >= limit : break return sorted ( texts )
Return all the English phrases in the Anki translation flashcards
37,480
def get_vocab ( docs ) : if isinstance ( docs , spacy . tokens . doc . Doc ) : return get_vocab ( [ docs ] ) vocab = set ( ) for doc in tqdm ( docs ) : for tok in doc : vocab . add ( ( tok . text , tok . pos_ , tok . tag_ , tok . dep_ , tok . ent_type_ , tok . ent_iob_ , tok . sentiment ) ) return pd . DataFrame ( sorted ( vocab ) , columns = 'word pos tag dep ent_type ent_iob sentiment' . split ( ) )
Build a DataFrame containing all the words in the docs provided along with their POS tags etc
37,481
def get_word_vectors ( vocab ) : wv = get_data ( 'word2vec' ) vectors = np . array ( len ( vocab ) , len ( wv [ 'the' ] ) ) for i , tok in enumerate ( vocab ) : word = tok [ 0 ] variations = ( word , word . lower ( ) , word . lower ( ) [ : - 1 ] ) for w in variations : if w in wv : vectors [ i , : ] = wv [ w ] if not np . sum ( np . abs ( vectors [ i ] ) ) : logger . warning ( 'Unable to find {}, {}, or {} in word2vec.' . format ( * variations ) ) return vectors
Create a word2vec embedding matrix for all the words in the vocab
37,482
def get_anki_vocab ( lang = [ 'eng' ] , limit = None , filename = 'anki_en_vocabulary.csv' ) : texts = get_anki_phrases ( lang = lang , limit = limit ) docs = nlp ( texts , lang = lang ) vocab = get_vocab ( docs ) vocab [ 'vector' ] = get_word_vectors ( vocab ) if filename : vocab . to_csv ( os . path . join ( BIGDATA_PATH , filename ) ) return vocab
Get all the vocab words + tags + wordvectors for the tokens in the Anki translation corpus
37,483
def lsa_twitter ( cased_tokens ) : if cased_tokens is None : cased_tokens = ( 'PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' + 'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip' ) . split ( ) cased_tokens += [ s + 's' for s in cased_tokens ] cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' 'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com' . split ( ) allcase_tokens = cased_tokens + [ s . lower ( ) for s in cased_tokens ] allcase_tokens += [ s . title ( ) for s in cased_tokens ] allcase_tokens += [ s . upper ( ) for s in cased_tokens ] KEEP_TOKENS = allcase_tokens + [ '#' + s for s in allcase_tokens ] vocab_path = os . path . join ( BIGDATA_PATH , 'vocab939370.pkl' ) if os . path . isfile ( vocab_path ) : print ( 'Loading vocab: {} ...' . format ( vocab_path ) ) vocab = Dictionary . load ( vocab_path ) print ( ' len(vocab) loaded: {}' . format ( len ( vocab . dfs ) ) ) else : tweets_path = os . path . join ( BIGDATA_PATH , 'tweets.csv.gz' ) print ( 'Loading tweets: {} ...' . format ( tweets_path ) ) tweets = read_csv ( tweets_path ) tweets = pd . np . array ( tweets . text . str . split ( ) ) with gzip . open ( os . path . join ( BIGDATA_PATH , 'tweets.txt.gz' ) , 'w' ) as f : for tokens in tweets : f . write ( ( ' ' . join ( tokens ) + '\n' ) . encode ( 'utf-8' ) ) print ( 'Computing vocab from {} tweets...' . format ( len ( tweets ) ) ) vocab = Dictionary ( tweets , no_below = NO_BELOW , no_above = NO_ABOVE , keep_tokens = set ( KEEP_TOKENS ) ) vocab . filter_extremes ( no_below = NO_BELOW , no_above = NO_ABOVE , keep_n = KEEP_N , keep_tokens = set ( KEEP_TOKENS ) ) print ( ' len(vocab) after filtering: {}' . format ( len ( vocab . dfs ) ) ) tfidf = TfidfModel ( id2word = vocab , dictionary = vocab ) tfidf . save ( os . path . join ( BIGDATA_PATH , 'tfidf{}.pkl' . format ( len ( vocab . dfs ) ) ) ) tweets = [ vocab . doc2bow ( tw ) for tw in tweets ] json . dump ( tweets , gzip . open ( os . path . join ( BIGDATA_PATH , 'tweet_bows.json.gz' ) , 'w' ) ) gc . collect ( ) lsa = LsiModel ( tfidf [ tweets ] , num_topics = 200 , id2word = vocab , extra_samples = 100 , power_iters = 2 ) return lsa
Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens
37,484
def wc ( f , verbose = False , nrows = None ) : r tqdm_prog = tqdm if verbose else no_tqdm with ensure_open ( f , mode = 'r' ) as fin : for i , line in tqdm_prog ( enumerate ( fin ) ) : if nrows is not None and i >= nrows - 1 : break return i + 1
r Count lines in a text file
37,485
def normalize_filepath ( filepath ) : r filename = os . path . basename ( filepath ) dirpath = filepath [ : - len ( filename ) ] cre_controlspace = re . compile ( r'[\t\r\n\f]+' ) new_filename = cre_controlspace . sub ( '' , filename ) if not new_filename == filename : logger . warning ( 'Stripping whitespace from filename: {} => {}' . format ( repr ( filename ) , repr ( new_filename ) ) ) filename = new_filename filename = filename . lower ( ) filename = normalize_ext ( filename ) if dirpath : dirpath = dirpath [ : - 1 ] return os . path . join ( dirpath , filename ) return filename
r Lowercase the filename and ext expanding extensions like . tgz to . tar . gz .
37,486
def find_filepath ( filename , basepaths = ( os . path . curdir , DATA_PATH , BIGDATA_PATH , BASE_DIR , '~' , '~/Downloads' , os . path . join ( '/' , 'tmp' ) , '..' ) ) : if os . path . isfile ( filename ) : return filename for basedir in basepaths : fullpath = expand_filepath ( os . path . join ( basedir , filename ) ) if os . path . isfile ( fullpath ) : return fullpath return False
Given a filename or path see if it exists in any of the common places datafiles might be
37,487
def close ( self ) : if not self . _closed : self . _closed = True if self . _pool is not None : self . _pool . close ( ) self . _pool = None
Shut down closing any open connections in the pool .
37,488
def hydrate_point ( srid , * coordinates ) : try : point_class , dim = __srid_table [ srid ] except KeyError : point = Point ( coordinates ) point . srid = srid return point else : if len ( coordinates ) != dim : raise ValueError ( "SRID %d requires %d coordinates (%d provided)" % ( srid , dim , len ( coordinates ) ) ) return point_class ( coordinates )
Create a new instance of a Point subclass from a raw set of fields . The subclass chosen is determined by the given SRID ; a ValueError will be raised if no such subclass can be found .
37,489
def dehydrate_point ( value ) : dim = len ( value ) if dim == 2 : return Structure ( b"X" , value . srid , * value ) elif dim == 3 : return Structure ( b"Y" , value . srid , * value ) else : raise ValueError ( "Cannot dehydrate Point with %d dimensions" % dim )
Dehydrator for Point data .
37,490
def dehydrate ( self , values ) : def dehydrate_ ( obj ) : try : f = self . dehydration_functions [ type ( obj ) ] except KeyError : pass else : return f ( obj ) if obj is None : return None elif isinstance ( obj , bool ) : return obj elif isinstance ( obj , int ) : if INT64_MIN <= obj <= INT64_MAX : return obj raise ValueError ( "Integer out of bounds (64-bit signed integer values only)" ) elif isinstance ( obj , float ) : return obj elif isinstance ( obj , str ) : return obj elif isinstance ( obj , ( bytes , bytearray ) ) : if self . supports_bytes : return obj else : raise TypeError ( "This PackSteam channel does not support BYTES (consider upgrading to Neo4j 3.2+)" ) elif isinstance ( obj , ( list , map_type ) ) : return list ( map ( dehydrate_ , obj ) ) elif isinstance ( obj , dict ) : if any ( not isinstance ( key , str ) for key in obj . keys ( ) ) : raise TypeError ( "Non-string dictionary keys are not supported" ) return { key : dehydrate_ ( value ) for key , value in obj . items ( ) } else : raise TypeError ( obj ) return tuple ( map ( dehydrate_ , values ) )
Convert native values into PackStream values .
37,491
def get ( self , key , default = None ) : try : index = self . __keys . index ( str ( key ) ) except ValueError : return default if 0 <= index < len ( self ) : return super ( Record , self ) . __getitem__ ( index ) else : return default
Obtain a value from the record by key returning a default value if the key does not exist .
37,492
def index ( self , key ) : if isinstance ( key , int ) : if 0 <= key < len ( self . __keys ) : return key raise IndexError ( key ) elif isinstance ( key , str ) : try : return self . __keys . index ( key ) except ValueError : raise KeyError ( key ) else : raise TypeError ( key )
Return the index of the given item .
37,493
def value ( self , key = 0 , default = None ) : try : index = self . index ( key ) except ( IndexError , KeyError ) : return default else : return self [ index ]
Obtain a single value from the record by index or key . If no index or key is specified the first value is returned . If the specified item does not exist the default value is returned .
37,494
def values ( self , * keys ) : if keys : d = [ ] for key in keys : try : i = self . index ( key ) except KeyError : d . append ( None ) else : d . append ( self [ i ] ) return d return list ( self )
Return the values of the record optionally filtering to include only certain values by index or key .
37,495
def items ( self , * keys ) : if keys : d = [ ] for key in keys : try : i = self . index ( key ) except KeyError : d . append ( ( key , None ) ) else : d . append ( ( self . __keys [ i ] , self [ i ] ) ) return d return list ( ( self . __keys [ i ] , super ( Record , self ) . __getitem__ ( i ) ) for i in range ( len ( self ) ) )
Return the fields of the record as a list of key and value tuples
37,496
def _make_plan ( plan_dict ) : operator_type = plan_dict [ "operatorType" ] identifiers = plan_dict . get ( "identifiers" , [ ] ) arguments = plan_dict . get ( "args" , [ ] ) children = [ _make_plan ( child ) for child in plan_dict . get ( "children" , [ ] ) ] if "dbHits" in plan_dict or "rows" in plan_dict : db_hits = plan_dict . get ( "dbHits" , 0 ) rows = plan_dict . get ( "rows" , 0 ) return ProfiledPlan ( operator_type , identifiers , arguments , children , db_hits , rows ) else : return Plan ( operator_type , identifiers , arguments , children )
Construct a Plan or ProfiledPlan from a dictionary of metadata values .
37,497
def unit_of_work ( metadata = None , timeout = None ) : def wrapper ( f ) : def wrapped ( * args , ** kwargs ) : return f ( * args , ** kwargs ) wrapped . metadata = metadata wrapped . timeout = timeout return wrapped return wrapper
This function is a decorator for transaction functions that allows extra control over how the transaction is carried out .
37,498
def close ( self ) : from neobolt . exceptions import ConnectionExpired , CypherError , ServiceUnavailable try : if self . has_transaction ( ) : try : self . rollback_transaction ( ) except ( CypherError , TransactionError , SessionError , ConnectionExpired , ServiceUnavailable ) : pass finally : self . _closed = True self . _disconnect ( sync = True )
Close the session . This will release any borrowed resources such as connections and will roll back any outstanding transactions .
37,499
def run ( self , statement , parameters = None , ** kwparameters ) : from neobolt . exceptions import ConnectionExpired self . _assert_open ( ) if not statement : raise ValueError ( "Cannot run an empty statement" ) if not isinstance ( statement , ( str , Statement ) ) : raise TypeError ( "Statement must be a string or a Statement instance" ) if not self . _connection : self . _connect ( ) cx = self . _connection protocol_version = cx . protocol_version server = cx . server has_transaction = self . has_transaction ( ) statement_text = str ( statement ) statement_metadata = getattr ( statement , "metadata" , None ) statement_timeout = getattr ( statement , "timeout" , None ) parameters = fix_parameters ( dict ( parameters or { } , ** kwparameters ) , protocol_version , supports_bytes = server . supports ( "bytes" ) ) def fail ( _ ) : self . _close_transaction ( ) hydrant = PackStreamHydrator ( protocol_version ) result_metadata = { "statement" : statement_text , "parameters" : parameters , "server" : server , "protocol_version" : protocol_version , } run_metadata = { "metadata" : statement_metadata , "timeout" : statement_timeout , "on_success" : result_metadata . update , "on_failure" : fail , } def done ( summary_metadata ) : result_metadata . update ( summary_metadata ) bookmark = result_metadata . get ( "bookmark" ) if bookmark : self . _bookmarks_in = tuple ( [ bookmark ] ) self . _bookmark_out = bookmark self . _last_result = result = BoltStatementResult ( self , hydrant , result_metadata ) if has_transaction : if statement_metadata : raise ValueError ( "Metadata can only be attached at transaction level" ) if statement_timeout : raise ValueError ( "Timeouts only apply at transaction level" ) else : run_metadata [ "bookmarks" ] = self . _bookmarks_in cx . run ( statement_text , parameters , ** run_metadata ) cx . pull_all ( on_records = lambda records : result . _records . extend ( hydrant . hydrate_records ( result . keys ( ) , records ) ) , on_success = done , on_failure = fail , on_summary = lambda : result . detach ( sync = False ) , ) if not has_transaction : try : self . _connection . send ( ) self . _connection . fetch ( ) except ConnectionExpired as error : raise SessionExpired ( * error . args ) return result
Run a Cypher statement within an auto - commit transaction .