idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
225,400
def onehot_encode ( dataset , char_indices , maxlen ) : X = np . zeros ( ( len ( dataset ) , maxlen , len ( char_indices . keys ( ) ) ) ) for i , sentence in enumerate ( dataset ) : for t , char in enumerate ( sentence ) : X [ i , t , char_indices [ char ] ] = 1 return X
One hot encode the tokens
87
5
225,401
def _fit_full ( self = self , X = X , n_components = 6 ) : n_samples , n_features = X . shape # Center data self . mean_ = np . mean ( X , axis = 0 ) print ( self . mean_ ) X -= self . mean_ print ( X . round ( 2 ) ) U , S , V = linalg . svd ( X , full_matrices = False ) print ( V . round ( 2 ) ) # flip eigenvectors' sign to enforce deterministic output U , V = svd_flip ( U , V ) components_ = V print ( components_ . round ( 2 ) ) # Get variance explained by singular values explained_variance_ = ( S ** 2 ) / ( n_samples - 1 ) total_var = explained_variance_ . sum ( ) explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S . copy ( ) # Store the singular values. # Postprocess the number of components required if n_components == 'mle' : n_components = _infer_dimension_ ( explained_variance_ , n_samples , n_features ) elif 0 < n_components < 1.0 : # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum ( explained_variance_ratio_ ) n_components = np . searchsorted ( ratio_cumsum , n_components ) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min ( n_features , n_samples ) : self . noise_variance_ = explained_variance_ [ n_components : ] . mean ( ) else : self . noise_variance_ = 0. self . n_samples_ , self . n_features_ = n_samples , n_features self . components_ = components_ [ : n_components ] print ( self . components_ . round ( 2 ) ) self . n_components_ = n_components self . explained_variance_ = explained_variance_ [ : n_components ] self . explained_variance_ratio_ = explained_variance_ratio_ [ : n_components ] self . singular_values_ = singular_values_ [ : n_components ] return U , S , V
Fit the model by computing full SVD on X
557
10
225,402
def extract_aiml ( path = 'aiml-en-us-foundation-alice.v1-9' ) : path = find_data_path ( path ) or path if os . path . isdir ( path ) : paths = os . listdir ( path ) paths = [ os . path . join ( path , p ) for p in paths ] else : zf = zipfile . ZipFile ( path ) paths = [ ] for name in zf . namelist ( ) : if '.hg/' in name : continue paths . append ( zf . extract ( name , path = BIGDATA_PATH ) ) return paths
Extract an aiml . zip file if it hasn t been already and return a list of aiml file paths
138
23
225,403
def create_brain ( path = 'aiml-en-us-foundation-alice.v1-9.zip' ) : path = find_data_path ( path ) or path bot = Bot ( ) num_templates = bot . _brain . template_count paths = extract_aiml ( path = path ) for path in paths : if not path . lower ( ) . endswith ( '.aiml' ) : continue try : bot . learn ( path ) except AimlParserError : logger . error ( format_exc ( ) ) logger . warning ( 'AIML Parse Error: {}' . format ( path ) ) num_templates = bot . _brain . template_count - num_templates logger . info ( 'Loaded {} trigger-response pairs.\n' . format ( num_templates ) ) print ( 'Loaded {} trigger-response pairs from {} AIML files.' . format ( bot . _brain . template_count , len ( paths ) ) ) return bot
Create an aiml_bot . Bot brain from an AIML zip file or directory of AIML files
218
23
225,404
def minify_urls ( filepath , ext = 'asc' , url_regex = None , output_ext = '.urls_minified' , access_token = None ) : access_token = access_token or secrets . bitly . access_token output_ext = output_ext or '' url_regex = regex . compile ( url_regex ) if isinstance ( url_regex , str ) else url_regex filemetas = [ ] for filemeta in find_files ( filepath , ext = ext ) : filemetas += [ filemeta ] altered_text = '' with open ( filemeta [ 'path' ] , 'rt' ) as fin : text = fin . read ( ) end = 0 for match in url_regex . finditer ( text ) : url = match . group ( ) start = match . start ( ) altered_text += text [ : start ] resp = requests . get ( 'https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}' . format ( access_token , url ) , allow_redirects = True , timeout = 5 ) js = resp . json ( ) short_url = js [ 'shortUrl' ] altered_text += short_url end = start + len ( url ) altered_text += text [ end : ] with open ( filemeta [ 'path' ] + ( output_ext or '' ) , 'wt' ) as fout : fout . write ( altered_text ) return altered_text
Use bitly or similar minifier to shrink all URLs in text files within a folder structure .
336
19
225,405
def delimit_slug ( slug , sep = ' ' ) : hyphenated_slug = re . sub ( CRE_SLUG_DELIMITTER , sep , slug ) return hyphenated_slug
Return a str of separated tokens found within a slugLike_This = > slug Like This
48
18
225,406
def clean_asciidoc ( text ) : text = re . sub ( r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])' , r'"\2' , text ) text = re . sub ( r'([a-zA-Z0-9])[\]_*]{1,2}' , r'\1"' , text ) return text
r Transform asciidoc text into ASCII text that NL parsers can handle
96
16
225,407
def split_sentences_regex ( text ) : parts = regex . split ( r'([a-zA-Z0-9][.?!])[\s$]' , text ) sentences = [ '' . join ( s ) for s in zip ( parts [ 0 : : 2 ] , parts [ 1 : : 2 ] ) ] return sentences + [ parts [ - 1 ] ] if len ( parts ) % 2 else sentences
Use dead - simple regex to split text into sentences . Very poor accuracy .
92
15
225,408
def split_sentences_spacy ( text , language_model = 'en' ) : doc = nlp ( text ) sentences = [ ] if not hasattr ( doc , 'sents' ) : logger . warning ( "Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded" ) return split_sentences_nltk ( text ) for w , span in enumerate ( doc . sents ) : sent = '' . join ( doc [ i ] . string for i in range ( span . start , span . end ) ) . strip ( ) if len ( sent ) : sentences . append ( sent ) return sentences
r You must download a spacy language model with python - m download en
137
15
225,409
def segment_sentences ( path = os . path . join ( DATA_PATH , 'book' ) , splitter = split_sentences_nltk , * * find_files_kwargs ) : sentences = [ ] if os . path . isdir ( path ) : for filemeta in find_files ( path , * * find_files_kwargs ) : with open ( filemeta [ 'path' ] ) as fin : i , batch = 0 , [ ] try : for i , line in enumerate ( fin ) : if not line . strip ( ) : sentences . extend ( splitter ( '\n' . join ( batch ) ) ) batch = [ line ] # may contain all whitespace else : batch . append ( line ) except ( UnicodeDecodeError , IOError ) : logger . error ( 'UnicodeDecodeError or IOError on line {} in file {} from stat: {}' . format ( i + 1 , fin . name , filemeta ) ) raise if len ( batch ) : # TODO: tag sentences with line + filename where they started sentences . extend ( splitter ( '\n' . join ( batch ) ) ) else : batch = [ ] for i , line in enumerate ( iter_lines ( path ) ) : # TODO: filter out code and meta lines using asciidoc or markdown parser # split into batches based on empty lines if not line . strip ( ) : sentences . extend ( splitter ( '\n' . join ( batch ) ) ) # first line may contain all whitespace batch = [ line ] else : batch . append ( line ) if len ( batch ) : # TODO: tag sentences with line + filename where they started sentences . extend ( splitter ( '\n' . join ( batch ) ) ) return sentences
Return a list of all sentences and empty lines .
384
10
225,410
def fix_hunspell_json ( badjson_path = 'en_us.json' , goodjson_path = 'en_us_fixed.json' ) : with open ( badjson_path , 'r' ) as fin : with open ( goodjson_path , 'w' ) as fout : for i , line in enumerate ( fin ) : line2 = regex . sub ( r'\[(\w)' , r'["\1' , line ) line2 = regex . sub ( r'(\w)\]' , r'\1"]' , line2 ) line2 = regex . sub ( r'(\w),(\w)' , r'\1","\2' , line2 ) fout . write ( line2 ) with open ( goodjson_path , 'r' ) as fin : words = [ ] with open ( goodjson_path + '.txt' , 'w' ) as fout : hunspell = json . load ( fin ) for word , affixes in hunspell [ 'words' ] . items ( ) : words += [ word ] fout . write ( word + '\n' ) for affix in affixes : words += [ affix ] fout . write ( affix + '\n' ) return words
Fix the invalid hunspellToJSON . py json format by inserting double - quotes in list of affix strings
276
22
225,411
def format_ubuntu_dialog ( df ) : s = '' for i , record in df . iterrows ( ) : statement = list ( split_turns ( record . Context ) ) [ - 1 ] # <1> reply = list ( split_turns ( record . Utterance ) ) [ - 1 ] # <2> s += 'Statement: {}\n' . format ( statement ) s += 'Reply: {}\n\n' . format ( reply ) return s
Print statements paired with replies formatted for easy review
104
9
225,412
def splitext ( filepath ) : exts = getattr ( CRE_FILENAME_EXT . search ( filepath ) , 'group' , str ) ( ) return ( filepath [ : ( - len ( exts ) or None ) ] , exts )
Like os . path . splitext except splits compound extensions as one long one
58
16
225,413
def offline_plotly_scatter3d ( df , x = 0 , y = 1 , z = - 1 ) : data = [ ] # clusters = [] colors = [ 'rgb(228,26,28)' , 'rgb(55,126,184)' , 'rgb(77,175,74)' ] # df.columns = clean_columns(df.columns) x = get_array ( df , x , default = 0 ) y = get_array ( df , y , default = 1 ) z = get_array ( df , z , default = - 1 ) for i in range ( len ( df [ 'name' ] . unique ( ) ) ) : name = df [ 'Name' ] . unique ( ) [ i ] color = colors [ i ] x = x [ pd . np . array ( df [ 'name' ] == name ) ] y = y [ pd . np . array ( df [ 'name' ] == name ) ] z = z [ pd . np . array ( df [ 'name' ] == name ) ] trace = dict ( name = name , x = x , y = y , z = z , type = "scatter3d" , mode = 'markers' , marker = dict ( size = 3 , color = color , line = dict ( width = 0 ) ) ) data . append ( trace ) layout = dict ( width = 800 , height = 550 , autosize = False , title = 'Iris dataset' , scene = dict ( xaxis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' ) , yaxis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' ) , zaxis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' ) , aspectratio = dict ( x = 1 , y = 1 , z = 0.7 ) , aspectmode = 'manual' ) , ) fig = dict ( data = data , layout = layout ) # IPython notebook # plotly.iplot(fig, filename='pandas-3d-iris', validate=False) url = plotly . offline . plot ( fig , filename = 'pandas-3d-iris' , validate = False ) return url
Plot an offline scatter plot colored according to the categories in the name column .
593
15
225,414
def offline_plotly_data ( data , filename = None , config = None , validate = True , default_width = '100%' , default_height = 525 , global_requirejs = False ) : config_default = dict ( DEFAULT_PLOTLY_CONFIG ) if config is not None : config_default . update ( config ) with open ( os . path . join ( DATA_PATH , 'plotly.js.min' ) , 'rt' ) as f : js = f . read ( ) html , divid , width , height = _plot_html ( data , config = config_default , validate = validate , default_width = default_width , default_height = default_height , global_requirejs = global_requirejs ) html = PLOTLY_HTML . format ( plotlyjs = js , plotlyhtml = html ) if filename and isinstance ( filename , str ) : with open ( filename , 'wt' ) as f : f . write ( html ) return html
r Write a plotly scatter plot to HTML file that doesn t require server
216
15
225,415
def normalize_etpinard_df ( df = 'https://plot.ly/~etpinard/191.csv' , columns = 'x y size text' . split ( ) , category_col = 'category' , possible_categories = [ 'Africa' , 'Americas' , 'Asia' , 'Europe' , 'Oceania' ] ) : possible_categories = [ 'Africa' , 'Americas' , 'Asia' , 'Europe' , 'Oceania' ] if possible_categories is None else possible_categories df . columns = clean_columns ( df . columns ) df = pd . read_csv ( df ) if isinstance ( df , str ) else df columns = clean_columns ( list ( columns ) ) df2 = pd . DataFrame ( columns = columns ) df2 [ category_col ] = np . concatenate ( [ np . array ( [ categ ] * len ( df ) ) for categ in possible_categories ] ) columns = zip ( columns , [ [ clean_columns ( categ + ', ' + column ) for categ in possible_categories ] for column in columns ] ) for col , category_cols in columns : df2 [ col ] = np . concatenate ( [ df [ label ] . values for label in category_cols ] ) return df2
Reformat a dataframe in etpinard s format for use in plot functions and sklearn models
295
20
225,416
def offline_plotly_scatter_bubble ( df , x = 'x' , y = 'y' , size_col = 'size' , text_col = 'text' , category_col = 'category' , possible_categories = None , filename = None , config = { 'displaylogo' : False } , xscale = None , yscale = 'log' , layout = { 'hovermode' : 'closest' , 'showlegend' : False , 'autosize' : True } , marker = { 'sizemode' : 'area' } , min_size = 10 , ) : config_default = dict ( DEFAULT_PLOTLY_CONFIG ) marker_default = { 'size' : size_col or min_size , 'sizemode' : 'area' , 'sizeref' : int ( df [ size_col ] . min ( ) * .8 ) if size_col else min_size } marker_default . update ( marker ) size_col = marker_default . pop ( 'size' ) layout_default = { 'xaxis' : XAxis ( title = x , type = xscale ) , 'yaxis' : YAxis ( title = y , type = yscale ) , } layout_default . update ( * * layout ) if config is not None : config_default . update ( config ) df . columns = clean_columns ( df . columns ) if possible_categories is None and category_col is not None : if category_col in df . columns : category_labels = df [ category_col ] else : category_labels = np . array ( category_col ) possible_categories = list ( set ( category_labels ) ) possible_categories = [ None ] if possible_categories is None else possible_categories if category_col and category_col in df : masks = [ np . array ( df [ category_col ] == label ) for label in possible_categories ] else : masks = [ np . array ( [ True ] * len ( df ) ) ] * len ( possible_categories ) data = { 'data' : [ Scatter ( x = df [ x ] [ mask ] . values , y = df [ y ] [ mask ] . values , text = df [ text_col ] [ mask ] . values , marker = Marker ( size = df [ size_col ] [ mask ] if size_col in df . columns else size_col , * * marker_default ) , mode = 'markers' , name = str ( category_name ) ) for ( category_name , mask ) in zip ( possible_categories , masks ) ] , 'layout' : Layout ( * * layout_default ) } return offline_plotly_data ( data , filename = filename , config = config_default )
r Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns
619
20
225,417
def format_hex ( i , num_bytes = 4 , prefix = '0x' ) : prefix = str ( prefix or '' ) i = int ( i or 0 ) return prefix + '{0:0{1}x}' . format ( i , num_bytes )
Format hexidecimal string from decimal integer value
60
10
225,418
def is_up_url ( url , allow_redirects = False , timeout = 5 ) : if not isinstance ( url , basestring ) or '.' not in url : return False normalized_url = prepend_http ( url ) session = requests . Session ( ) session . mount ( url , HTTPAdapter ( max_retries = 2 ) ) try : resp = session . get ( normalized_url , allow_redirects = allow_redirects , timeout = timeout ) except ConnectionError : return None except : return None if resp . status_code in ( 301 , 302 , 307 ) or resp . headers . get ( 'location' , None ) : return resp . headers . get ( 'location' , None ) # return redirected URL elif 100 <= resp . status_code < 400 : return normalized_url # return the original URL that was requested/visited else : return False
r Check URL to see if it is a valid web page return the redirected location if it is
191
19
225,419
def get_markdown_levels ( lines , levels = set ( ( 0 , 1 , 2 , 3 , 4 , 5 , 6 ) ) ) : if isinstance ( levels , ( int , float , basestring , str , bytes ) ) : levels = [ float ( levels ) ] levels = set ( [ int ( i ) for i in levels ] ) if isinstance ( lines , basestring ) : lines = lines . splitlines ( ) level_lines = [ ] for line in lines : level_line = None if 0 in levels : level_line = ( 0 , line ) lstripped = line . lstrip ( ) for i in range ( 6 , 1 , - 1 ) : if lstripped . startswith ( '#' * i ) : level_line = ( i , lstripped [ i : ] . lstrip ( ) ) break if level_line and level_line [ 0 ] in levels : level_lines . append ( level_line ) return level_lines
r Return a list of 2 - tuples with a level integer for the heading levels
213
17
225,420
def iter_lines ( url_or_text , ext = None , mode = 'rt' ) : if url_or_text is None or not url_or_text : return [ ] # url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm' elif isinstance ( url_or_text , ( str , bytes , basestring ) ) : if '\n' in url_or_text or '\r' in url_or_text : return StringIO ( url_or_text ) elif os . path . isfile ( os . path . join ( DATA_PATH , url_or_text ) ) : return open ( os . path . join ( DATA_PATH , url_or_text ) , mode = mode ) elif os . path . isfile ( url_or_text ) : return open ( os . path . join ( url_or_text ) , mode = mode ) if os . path . isdir ( url_or_text ) : filepaths = [ filemeta [ 'path' ] for filemeta in find_files ( url_or_text , ext = ext ) ] return itertools . chain . from_iterable ( map ( open , filepaths ) ) url = looks_like_url ( url_or_text ) if url : for i in range ( 3 ) : return requests . get ( url , stream = True , allow_redirects = True , timeout = 5 ) else : return StringIO ( url_or_text ) elif isinstance ( url_or_text , ( list , tuple ) ) : # FIXME: make this lazy with chain and map so it doesn't gobble up RAM text = '' for s in url_or_text : text += '\n' . join ( list ( iter_lines ( s , ext = ext , mode = mode ) ) ) + '\n' return iter_lines ( text )
r Return an iterator over the lines of a file or URI response .
429
14
225,421
def clean_csvs ( dialogpath = None ) : dialogdir = os . dirname ( dialogpath ) if os . path . isfile ( dialogpath ) else dialogpath filenames = [ dialogpath . split ( os . path . sep ) [ - 1 ] ] if os . path . isfile ( dialogpath ) else os . listdir ( dialogpath ) for filename in filenames : filepath = os . path . join ( dialogdir , filename ) df = clean_df ( filepath ) df . to_csv ( filepath , header = None ) return filenames
Translate non - ASCII characters to spaces or equivalent ASCII characters
125
12
225,422
def unicode2ascii ( text , expand = True ) : translate = UTF8_TO_ASCII if not expand else UTF8_TO_MULTIASCII output = '' for c in text : if not c or ord ( c ) < 128 : output += c else : output += translate [ c ] if c in translate else ' ' return output . strip ( )
r Translate UTF8 characters to ASCII
81
8
225,423
def clean_df ( df , header = None , * * read_csv_kwargs ) : df = read_csv ( df , header = header , * * read_csv_kwargs ) df = df . fillna ( ' ' ) for col in df . columns : df [ col ] = df [ col ] . apply ( unicode2ascii ) return df
Convert UTF8 characters in a CSV file or dataframe into ASCII
80
14
225,424
def get_acronyms ( manuscript = os . path . expanduser ( '~/code/nlpia/lane/manuscript' ) ) : acronyms = [ ] for f , lines in get_lines ( manuscript ) : for line in lines : matches = CRE_ACRONYM . finditer ( line ) if matches : for m in matches : if m . group ( 'a2' ) : acronyms . append ( ( m . group ( 'a2' ) , m . group ( 's2' ) ) ) elif m . group ( 'a3' ) : acronyms . append ( ( m . group ( 'a3' ) , m . group ( 's3' ) ) ) elif m . group ( 'a4' ) : acronyms . append ( ( m . group ( 'a4' ) , m . group ( 's4' ) ) ) elif m . group ( 'a5' ) : acronyms . append ( ( m . group ( 'a5' ) , m . group ( 's5' ) ) ) return sorted ( dict ( acronyms ) . items ( ) )
Find all the 2 and 3 - letter acronyms in the manuscript and return as a sorted list of tuples
252
23
225,425
def write_glossary ( manuscript = os . path . expanduser ( '~/code/nlpia/lane/manuscript' ) , linesep = None ) : linesep = linesep or os . linesep lines = [ '[acronyms]' , '== Acronyms' , '' , '[acronyms,template="glossary",id="terms"]' ] acronyms = get_acronyms ( manuscript ) for a in acronyms : lines . append ( '*{}*:: {} -- ' . format ( a [ 0 ] , a [ 1 ] [ 0 ] . upper ( ) + a [ 1 ] [ 1 : ] ) ) return linesep . join ( lines )
Compose an asciidoc string with acronyms culled from the manuscript
157
17
225,426
def infer_url_title ( url ) : meta = get_url_filemeta ( url ) title = '' if meta : if meta . get ( 'hostname' , url ) == 'drive.google.com' : title = get_url_title ( url ) else : title = meta . get ( 'filename' , meta [ 'hostname' ] ) or meta [ 'hostname' ] title , fileext = splitext ( title ) else : logging . error ( 'Unable to retrieve URL: {}' . format ( url ) ) return None return delimit_slug ( title , ' ' )
Guess what the page title is going to be from the path and FQDN in the URL
132
20
225,427
def translate_book ( translators = ( HyperlinkStyleCorrector ( ) . translate , translate_line_footnotes ) , book_dir = BOOK_PATH , dest = None , include_tags = None , ext = '.nlpiabak' , skip_untitled = True ) : if callable ( translators ) or not hasattr ( translators , '__len__' ) : translators = ( translators , ) sections = get_tagged_sections ( book_dir = book_dir , include_tags = include_tags ) file_line_maps = [ ] for fileid , ( filepath , tagged_lines ) in enumerate ( sections ) : logger . info ( 'filepath={}' . format ( filepath ) ) destpath = filepath if not dest : copyfile ( filepath , filepath + '.' + ext . lstrip ( '.' ) ) elif os . path . sep in dest : destpath = os . path . join ( dest , os . path . basename ( filepath ) ) else : destpath = os . path . join ( os . path . dirname ( filepath ) , dest , os . path . basename ( filepath ) ) ensure_dir_exists ( os . path . dirname ( destpath ) ) with open ( destpath , 'w' ) as fout : logger . info ( 'destpath={}' . format ( destpath ) ) for lineno , ( tag , line ) in enumerate ( tagged_lines ) : if ( include_tags is None or tag in include_tags or any ( ( tag . startswith ( t ) for t in include_tags ) ) ) : for translate in translators : new_line = translate ( line ) # TODO: be smarter about writing to files in-place if line != new_line : file_line_maps . append ( ( fileid , lineno , filepath , destpath , line , new_line ) ) line = new_line fout . write ( line ) return file_line_maps
Fix any style corrections listed in translate list of translation functions
440
11
225,428
def filter_lines ( input_file , output_file , translate = lambda line : line ) : filepath , lines = get_lines ( [ input_file ] ) [ 0 ] return filepath , [ ( tag , translate ( line = line , tag = tag ) ) for ( tag , line ) in lines ]
Translate all the lines of a single file
67
9
225,429
def filter_tagged_lines ( tagged_lines , include_tags = None , exclude_tags = None ) : include_tags = ( include_tags , ) if isinstance ( include_tags , str ) else include_tags exclude_tags = ( exclude_tags , ) if isinstance ( exclude_tags , str ) else exclude_tags for tagged_line in tagged_lines : if ( include_tags is None or tagged_line [ 0 ] in include_tags or any ( ( tagged_line [ 0 ] . startswith ( t ) for t in include_tags ) ) ) : if exclude_tags is None or not any ( ( tagged_line [ 0 ] . startswith ( t ) for t in exclude_tags ) ) : yield tagged_line else : logger . debug ( 'skipping tag {} because it starts with one of the exclude_tags={}' . format ( tagged_line [ 0 ] , exclude_tags ) ) else : logger . debug ( 'skipping tag {} because not in {}' . format ( tagged_line [ 0 ] , include_tags ) )
r Return iterable of tagged lines where the tags all start with one of the include_tags prefixes
234
21
225,430
def accuracy_study ( tdm = None , u = None , s = None , vt = None , verbosity = 0 , * * kwargs ) : smat = np . zeros ( ( len ( u ) , len ( vt ) ) ) np . fill_diagonal ( smat , s ) smat = pd . DataFrame ( smat , columns = vt . index , index = u . index ) if verbosity : print ( ) print ( 'Sigma:' ) print ( smat . round ( 2 ) ) print ( ) print ( 'Sigma without zeroing any dim:' ) print ( np . diag ( smat . round ( 2 ) ) ) tdm_prime = u . values . dot ( smat . values ) . dot ( vt . values ) if verbosity : print ( ) print ( 'Reconstructed Term-Document Matrix' ) print ( tdm_prime . round ( 2 ) ) err = [ np . sqrt ( ( ( tdm_prime - tdm ) . values . flatten ( ) ** 2 ) . sum ( ) / np . product ( tdm . shape ) ) ] if verbosity : print ( ) print ( 'Error without reducing dimensions:' ) print ( err [ - 1 ] ) # 2.3481474529927113e-15 smat2 = smat . copy ( ) for numdim in range ( len ( s ) - 1 , 0 , - 1 ) : smat2 . iloc [ numdim , numdim ] = 0 if verbosity : print ( 'Sigma after zeroing out dim {}' . format ( numdim ) ) print ( np . diag ( smat2 . round ( 2 ) ) ) # d0 d1 d2 d3 d4 d5 # ship 2.16 0.00 0.0 0.0 0.0 0.0 # boat 0.00 1.59 0.0 0.0 0.0 0.0 # ocean 0.00 0.00 0.0 0.0 0.0 0.0 # voyage 0.00 0.00 0.0 0.0 0.0 0.0 # trip 0.00 0.00 0.0 0.0 0.0 0.0 tdm_prime2 = u . values . dot ( smat2 . values ) . dot ( vt . values ) err += [ np . sqrt ( ( ( tdm_prime2 - tdm ) . values . flatten ( ) ** 2 ) . sum ( ) / np . product ( tdm . shape ) ) ] if verbosity : print ( 'Error after zeroing out dim {}' . format ( numdim ) ) print ( err [ - 1 ] ) return err
Reconstruct the term - document matrix and measure error as SVD terms are truncated
583
18
225,431
def get_anki_phrases ( lang = 'english' , limit = None ) : lang = lang . strip ( ) . lower ( ) [ : 3 ] lang = LANG2ANKI [ lang [ : 2 ] ] if lang not in ANKI_LANGUAGES else lang if lang [ : 2 ] == 'en' : return get_anki_phrases_english ( limit = limit ) return sorted ( get_data ( lang ) . iloc [ : , - 1 ] . str . strip ( ) . values )
Retrieve as many anki paired - statement corpora as you can for the requested language
117
18
225,432
def get_anki_phrases_english ( limit = None ) : texts = set ( ) for lang in ANKI_LANGUAGES : df = get_data ( lang ) phrases = df . eng . str . strip ( ) . values texts = texts . union ( set ( phrases ) ) if limit and len ( texts ) >= limit : break return sorted ( texts )
Return all the English phrases in the Anki translation flashcards
81
12
225,433
def get_vocab ( docs ) : if isinstance ( docs , spacy . tokens . doc . Doc ) : return get_vocab ( [ docs ] ) vocab = set ( ) for doc in tqdm ( docs ) : for tok in doc : vocab . add ( ( tok . text , tok . pos_ , tok . tag_ , tok . dep_ , tok . ent_type_ , tok . ent_iob_ , tok . sentiment ) ) # TODO: add ent type info and other flags, e.g. like_url, like_email, etc return pd . DataFrame ( sorted ( vocab ) , columns = 'word pos tag dep ent_type ent_iob sentiment' . split ( ) )
Build a DataFrame containing all the words in the docs provided along with their POS tags etc
168
18
225,434
def get_word_vectors ( vocab ) : wv = get_data ( 'word2vec' ) vectors = np . array ( len ( vocab ) , len ( wv [ 'the' ] ) ) for i , tok in enumerate ( vocab ) : word = tok [ 0 ] variations = ( word , word . lower ( ) , word . lower ( ) [ : - 1 ] ) for w in variations : if w in wv : vectors [ i , : ] = wv [ w ] if not np . sum ( np . abs ( vectors [ i ] ) ) : logger . warning ( 'Unable to find {}, {}, or {} in word2vec.' . format ( * variations ) ) return vectors
Create a word2vec embedding matrix for all the words in the vocab
159
16
225,435
def get_anki_vocab ( lang = [ 'eng' ] , limit = None , filename = 'anki_en_vocabulary.csv' ) : texts = get_anki_phrases ( lang = lang , limit = limit ) docs = nlp ( texts , lang = lang ) vocab = get_vocab ( docs ) vocab [ 'vector' ] = get_word_vectors ( vocab ) # TODO: turn this into a KeyedVectors object if filename : vocab . to_csv ( os . path . join ( BIGDATA_PATH , filename ) ) return vocab
Get all the vocab words + tags + wordvectors for the tokens in the Anki translation corpus
135
21
225,436
def wc ( f , verbose = False , nrows = None ) : tqdm_prog = tqdm if verbose else no_tqdm with ensure_open ( f , mode = 'r' ) as fin : for i , line in tqdm_prog ( enumerate ( fin ) ) : if nrows is not None and i >= nrows - 1 : break # fin.seek(0) return i + 1
r Count lines in a text file
97
7
225,437
def normalize_filepath ( filepath ) : filename = os . path . basename ( filepath ) dirpath = filepath [ : - len ( filename ) ] cre_controlspace = re . compile ( r'[\t\r\n\f]+' ) new_filename = cre_controlspace . sub ( '' , filename ) if not new_filename == filename : logger . warning ( 'Stripping whitespace from filename: {} => {}' . format ( repr ( filename ) , repr ( new_filename ) ) ) filename = new_filename filename = filename . lower ( ) filename = normalize_ext ( filename ) if dirpath : dirpath = dirpath [ : - 1 ] # get rid of the trailing os.path.sep return os . path . join ( dirpath , filename ) return filename
r Lowercase the filename and ext expanding extensions like . tgz to . tar . gz .
176
20
225,438
def find_filepath ( filename , basepaths = ( os . path . curdir , DATA_PATH , BIGDATA_PATH , BASE_DIR , '~' , '~/Downloads' , os . path . join ( '/' , 'tmp' ) , '..' ) ) : if os . path . isfile ( filename ) : return filename for basedir in basepaths : fullpath = expand_filepath ( os . path . join ( basedir , filename ) ) if os . path . isfile ( fullpath ) : return fullpath return False
Given a filename or path see if it exists in any of the common places datafiles might be
122
19
225,439
def close ( self ) : if not self . _closed : self . _closed = True if self . _pool is not None : self . _pool . close ( ) self . _pool = None
Shut down closing any open connections in the pool .
42
10
225,440
def hydrate_point ( srid , * coordinates ) : try : point_class , dim = __srid_table [ srid ] except KeyError : point = Point ( coordinates ) point . srid = srid return point else : if len ( coordinates ) != dim : raise ValueError ( "SRID %d requires %d coordinates (%d provided)" % ( srid , dim , len ( coordinates ) ) ) return point_class ( coordinates )
Create a new instance of a Point subclass from a raw set of fields . The subclass chosen is determined by the given SRID ; a ValueError will be raised if no such subclass can be found .
96
40
225,441
def dehydrate_point ( value ) : dim = len ( value ) if dim == 2 : return Structure ( b"X" , value . srid , * value ) elif dim == 3 : return Structure ( b"Y" , value . srid , * value ) else : raise ValueError ( "Cannot dehydrate Point with %d dimensions" % dim )
Dehydrator for Point data .
78
7
225,442
def dehydrate ( self , values ) : def dehydrate_ ( obj ) : try : f = self . dehydration_functions [ type ( obj ) ] except KeyError : pass else : return f ( obj ) if obj is None : return None elif isinstance ( obj , bool ) : return obj elif isinstance ( obj , int ) : if INT64_MIN <= obj <= INT64_MAX : return obj raise ValueError ( "Integer out of bounds (64-bit signed integer values only)" ) elif isinstance ( obj , float ) : return obj elif isinstance ( obj , str ) : return obj elif isinstance ( obj , ( bytes , bytearray ) ) : # order is important here - bytes must be checked after string if self . supports_bytes : return obj else : raise TypeError ( "This PackSteam channel does not support BYTES (consider upgrading to Neo4j 3.2+)" ) elif isinstance ( obj , ( list , map_type ) ) : return list ( map ( dehydrate_ , obj ) ) elif isinstance ( obj , dict ) : if any ( not isinstance ( key , str ) for key in obj . keys ( ) ) : raise TypeError ( "Non-string dictionary keys are not supported" ) return { key : dehydrate_ ( value ) for key , value in obj . items ( ) } else : raise TypeError ( obj ) return tuple ( map ( dehydrate_ , values ) )
Convert native values into PackStream values .
315
9
225,443
def get ( self , key , default = None ) : try : index = self . __keys . index ( str ( key ) ) except ValueError : return default if 0 <= index < len ( self ) : return super ( Record , self ) . __getitem__ ( index ) else : return default
Obtain a value from the record by key returning a default value if the key does not exist .
63
20
225,444
def index ( self , key ) : if isinstance ( key , int ) : if 0 <= key < len ( self . __keys ) : return key raise IndexError ( key ) elif isinstance ( key , str ) : try : return self . __keys . index ( key ) except ValueError : raise KeyError ( key ) else : raise TypeError ( key )
Return the index of the given item .
78
8
225,445
def value ( self , key = 0 , default = None ) : try : index = self . index ( key ) except ( IndexError , KeyError ) : return default else : return self [ index ]
Obtain a single value from the record by index or key . If no index or key is specified the first value is returned . If the specified item does not exist the default value is returned .
42
39
225,446
def values ( self , * keys ) : if keys : d = [ ] for key in keys : try : i = self . index ( key ) except KeyError : d . append ( None ) else : d . append ( self [ i ] ) return d return list ( self )
Return the values of the record optionally filtering to include only certain values by index or key .
59
18
225,447
def items ( self , * keys ) : if keys : d = [ ] for key in keys : try : i = self . index ( key ) except KeyError : d . append ( ( key , None ) ) else : d . append ( ( self . __keys [ i ] , self [ i ] ) ) return d return list ( ( self . __keys [ i ] , super ( Record , self ) . __getitem__ ( i ) ) for i in range ( len ( self ) ) )
Return the fields of the record as a list of key and value tuples
106
15
225,448
def _make_plan ( plan_dict ) : operator_type = plan_dict [ "operatorType" ] identifiers = plan_dict . get ( "identifiers" , [ ] ) arguments = plan_dict . get ( "args" , [ ] ) children = [ _make_plan ( child ) for child in plan_dict . get ( "children" , [ ] ) ] if "dbHits" in plan_dict or "rows" in plan_dict : db_hits = plan_dict . get ( "dbHits" , 0 ) rows = plan_dict . get ( "rows" , 0 ) return ProfiledPlan ( operator_type , identifiers , arguments , children , db_hits , rows ) else : return Plan ( operator_type , identifiers , arguments , children )
Construct a Plan or ProfiledPlan from a dictionary of metadata values .
171
14
225,449
def unit_of_work ( metadata = None , timeout = None ) : def wrapper ( f ) : def wrapped ( * args , * * kwargs ) : return f ( * args , * * kwargs ) wrapped . metadata = metadata wrapped . timeout = timeout return wrapped return wrapper
This function is a decorator for transaction functions that allows extra control over how the transaction is carried out .
61
21
225,450
def close ( self ) : from neobolt . exceptions import ConnectionExpired , CypherError , ServiceUnavailable try : if self . has_transaction ( ) : try : self . rollback_transaction ( ) except ( CypherError , TransactionError , SessionError , ConnectionExpired , ServiceUnavailable ) : pass finally : self . _closed = True self . _disconnect ( sync = True )
Close the session . This will release any borrowed resources such as connections and will roll back any outstanding transactions .
87
21
225,451
def run ( self , statement , parameters = None , * * kwparameters ) : from neobolt . exceptions import ConnectionExpired self . _assert_open ( ) if not statement : raise ValueError ( "Cannot run an empty statement" ) if not isinstance ( statement , ( str , Statement ) ) : raise TypeError ( "Statement must be a string or a Statement instance" ) if not self . _connection : self . _connect ( ) cx = self . _connection protocol_version = cx . protocol_version server = cx . server has_transaction = self . has_transaction ( ) statement_text = str ( statement ) statement_metadata = getattr ( statement , "metadata" , None ) statement_timeout = getattr ( statement , "timeout" , None ) parameters = fix_parameters ( dict ( parameters or { } , * * kwparameters ) , protocol_version , supports_bytes = server . supports ( "bytes" ) ) def fail ( _ ) : self . _close_transaction ( ) hydrant = PackStreamHydrator ( protocol_version ) result_metadata = { "statement" : statement_text , "parameters" : parameters , "server" : server , "protocol_version" : protocol_version , } run_metadata = { "metadata" : statement_metadata , "timeout" : statement_timeout , "on_success" : result_metadata . update , "on_failure" : fail , } def done ( summary_metadata ) : result_metadata . update ( summary_metadata ) bookmark = result_metadata . get ( "bookmark" ) if bookmark : self . _bookmarks_in = tuple ( [ bookmark ] ) self . _bookmark_out = bookmark self . _last_result = result = BoltStatementResult ( self , hydrant , result_metadata ) if has_transaction : if statement_metadata : raise ValueError ( "Metadata can only be attached at transaction level" ) if statement_timeout : raise ValueError ( "Timeouts only apply at transaction level" ) else : run_metadata [ "bookmarks" ] = self . _bookmarks_in cx . run ( statement_text , parameters , * * run_metadata ) cx . pull_all ( on_records = lambda records : result . _records . extend ( hydrant . hydrate_records ( result . keys ( ) , records ) ) , on_success = done , on_failure = fail , on_summary = lambda : result . detach ( sync = False ) , ) if not has_transaction : try : self . _connection . send ( ) self . _connection . fetch ( ) except ConnectionExpired as error : raise SessionExpired ( * error . args ) return result
Run a Cypher statement within an auto - commit transaction .
589
12
225,452
def send ( self ) : from neobolt . exceptions import ConnectionExpired if self . _connection : try : self . _connection . send ( ) except ConnectionExpired as error : raise SessionExpired ( * error . args )
Send all outstanding requests .
49
5
225,453
def fetch ( self ) : from neobolt . exceptions import ConnectionExpired if self . _connection : try : detail_count , _ = self . _connection . fetch ( ) except ConnectionExpired as error : raise SessionExpired ( * error . args ) else : return detail_count return 0
Attempt to fetch at least one more record .
63
9
225,454
def detach ( self , result , sync = True ) : count = 0 if sync and result . attached ( ) : self . send ( ) fetch = self . fetch while result . attached ( ) : count += fetch ( ) if self . _last_result is result : self . _last_result = None if not self . has_transaction ( ) : self . _disconnect ( sync = False ) result . _session = None return count
Detach a result from this session by fetching and buffering any remaining records .
93
17
225,455
def run ( self , statement , parameters = None , * * kwparameters ) : self . _assert_open ( ) return self . session . run ( statement , parameters , * * kwparameters )
Run a Cypher statement within the context of this transaction .
45
12
225,456
def detach ( self , sync = True ) : if self . attached ( ) : return self . _session . detach ( self , sync = sync ) else : return 0
Detach this result from its parent session by fetching the remainder of this result from the network into the buffer .
35
23
225,457
def keys ( self ) : try : return self . _metadata [ "fields" ] except KeyError : if self . attached ( ) : self . _session . send ( ) while self . attached ( ) and "fields" not in self . _metadata : self . _session . fetch ( ) return self . _metadata . get ( "fields" )
The keys for the records in this result .
74
9
225,458
def records ( self ) : records = self . _records next_record = records . popleft while records : yield next_record ( ) attached = self . attached if attached ( ) : self . _session . send ( ) while attached ( ) : self . _session . fetch ( ) while records : yield next_record ( )
Generator for records obtained from this result .
71
9
225,459
def summary ( self ) : self . detach ( ) if self . _summary is None : self . _summary = BoltStatementResultSummary ( * * self . _metadata ) return self . _summary
Obtain the summary of this result buffering any remaining records .
41
13
225,460
def single ( self ) : records = list ( self ) size = len ( records ) if size == 0 : return None if size != 1 : warn ( "Expected a result with a single record, but this result contains %d" % size ) return records [ 0 ]
Obtain the next and only remaining record from this result .
57
12
225,461
def peek ( self ) : records = self . _records if records : return records [ 0 ] if not self . attached ( ) : return None if self . attached ( ) : self . _session . send ( ) while self . attached ( ) and not records : self . _session . fetch ( ) if records : return records [ 0 ] return None
Obtain the next record from this result without consuming it . This leaves the record in the buffer for further processing .
74
23
225,462
def value ( self , item = 0 , default = None ) : return [ record . value ( item , default ) for record in self . records ( ) ]
Return the remainder of the result as a list of values .
33
12
225,463
def pull ( self ) : # n.b. pull is now somewhat misleadingly named because it doesn't do anything # the connection isn't touched until you try and iterate the generator we return lock_acquired = self . _pull_lock . acquire ( blocking = False ) if not lock_acquired : raise PullOrderException ( ) return self . _results_generator ( )
Returns a generator containing the results of the next query in the pipeline
81
13
225,464
def deprecated ( message ) : def f__ ( f ) : def f_ ( * args , * * kwargs ) : from warnings import warn warn ( message , category = DeprecationWarning , stacklevel = 2 ) return f ( * args , * * kwargs ) f_ . __name__ = f . __name__ f_ . __doc__ = f . __doc__ f_ . __dict__ . update ( f . __dict__ ) return f_ return f__
Decorator for deprecating functions and methods .
104
11
225,465
def experimental ( message ) : def f__ ( f ) : def f_ ( * args , * * kwargs ) : from warnings import warn warn ( message , category = ExperimentalWarning , stacklevel = 2 ) return f ( * args , * * kwargs ) f_ . __name__ = f . __name__ f_ . __doc__ = f . __doc__ f_ . __dict__ . update ( f . __dict__ ) return f_ return f__
Decorator for tagging experimental functions and methods .
102
10
225,466
def hydrate_time ( nanoseconds , tz = None ) : seconds , nanoseconds = map ( int , divmod ( nanoseconds , 1000000000 ) ) minutes , seconds = map ( int , divmod ( seconds , 60 ) ) hours , minutes = map ( int , divmod ( minutes , 60 ) ) seconds = ( 1000000000 * seconds + nanoseconds ) / 1000000000 t = Time ( hours , minutes , seconds ) if tz is None : return t tz_offset_minutes , tz_offset_seconds = divmod ( tz , 60 ) zone = FixedOffset ( tz_offset_minutes ) return zone . localize ( t )
Hydrator for Time and LocalTime values .
150
9
225,467
def dehydrate_time ( value ) : if isinstance ( value , Time ) : nanoseconds = int ( value . ticks * 1000000000 ) elif isinstance ( value , time ) : nanoseconds = ( 3600000000000 * value . hour + 60000000000 * value . minute + 1000000000 * value . second + 1000 * value . microsecond ) else : raise TypeError ( "Value must be a neotime.Time or a datetime.time" ) if value . tzinfo : return Structure ( b"T" , nanoseconds , value . tzinfo . utcoffset ( value ) . seconds ) else : return Structure ( b"t" , nanoseconds )
Dehydrator for time values .
153
7
225,468
def hydrate_datetime ( seconds , nanoseconds , tz = None ) : minutes , seconds = map ( int , divmod ( seconds , 60 ) ) hours , minutes = map ( int , divmod ( minutes , 60 ) ) days , hours = map ( int , divmod ( hours , 24 ) ) seconds = ( 1000000000 * seconds + nanoseconds ) / 1000000000 t = DateTime . combine ( Date . from_ordinal ( UNIX_EPOCH_DATE_ORDINAL + days ) , Time ( hours , minutes , seconds ) ) if tz is None : return t if isinstance ( tz , int ) : tz_offset_minutes , tz_offset_seconds = divmod ( tz , 60 ) zone = FixedOffset ( tz_offset_minutes ) else : zone = timezone ( tz ) return zone . localize ( t )
Hydrator for DateTime and LocalDateTime values .
195
11
225,469
def dehydrate_datetime ( value ) : def seconds_and_nanoseconds ( dt ) : if isinstance ( dt , datetime ) : dt = DateTime . from_native ( dt ) zone_epoch = DateTime ( 1970 , 1 , 1 , tzinfo = dt . tzinfo ) t = dt . to_clock_time ( ) - zone_epoch . to_clock_time ( ) return t . seconds , t . nanoseconds tz = value . tzinfo if tz is None : # without time zone value = utc . localize ( value ) seconds , nanoseconds = seconds_and_nanoseconds ( value ) return Structure ( b"d" , seconds , nanoseconds ) elif hasattr ( tz , "zone" ) and tz . zone : # with named time zone seconds , nanoseconds = seconds_and_nanoseconds ( value ) return Structure ( b"f" , seconds , nanoseconds , tz . zone ) else : # with time offset seconds , nanoseconds = seconds_and_nanoseconds ( value ) return Structure ( b"F" , seconds , nanoseconds , tz . utcoffset ( value ) . seconds )
Dehydrator for datetime values .
283
8
225,470
def hydrate_duration ( months , days , seconds , nanoseconds ) : return Duration ( months = months , days = days , seconds = seconds , nanoseconds = nanoseconds )
Hydrator for Duration values .
43
6
225,471
def dehydrate_duration ( value ) : return Structure ( b"E" , value . months , value . days , value . seconds , int ( 1000000000 * value . subseconds ) )
Dehydrator for duration values .
40
7
225,472
def dehydrate_timedelta ( value ) : months = 0 days = value . days seconds = value . seconds nanoseconds = 1000 * value . microseconds return Structure ( b"E" , months , days , seconds , nanoseconds )
Dehydrator for timedelta values .
54
8
225,473
def zoom ( self , locator , percent = "200%" , steps = 1 ) : driver = self . _current_application ( ) element = self . _element_find ( locator , True , True ) driver . zoom ( element = element , percent = percent , steps = steps )
Zooms in on an element a certain amount .
61
11
225,474
def scroll ( self , start_locator , end_locator ) : el1 = self . _element_find ( start_locator , True , True ) el2 = self . _element_find ( end_locator , True , True ) driver = self . _current_application ( ) driver . scroll ( el1 , el2 )
Scrolls from one element to another Key attributes for arbitrary elements are id and name . See introduction for details about locating elements .
74
25
225,475
def scroll_up ( self , locator ) : driver = self . _current_application ( ) element = self . _element_find ( locator , True , True ) driver . execute_script ( "mobile: scroll" , { "direction" : 'up' , 'element' : element . id } )
Scrolls up to element
67
5
225,476
def long_press ( self , locator , duration = 1000 ) : driver = self . _current_application ( ) element = self . _element_find ( locator , True , True ) action = TouchAction ( driver ) action . press ( element ) . wait ( duration ) . release ( ) . perform ( )
Long press the element with optional duration
67
7
225,477
def click_a_point ( self , x = 0 , y = 0 , duration = 100 ) : self . _info ( "Clicking on a point (%s,%s)." % ( x , y ) ) driver = self . _current_application ( ) action = TouchAction ( driver ) try : action . press ( x = float ( x ) , y = float ( y ) ) . wait ( float ( duration ) ) . release ( ) . perform ( ) except : assert False , "Can't click on a point at (%s,%s)" % ( x , y )
Click on a point
124
4
225,478
def click_element_at_coordinates ( self , coordinate_X , coordinate_Y ) : self . _info ( "Pressing at (%s, %s)." % ( coordinate_X , coordinate_Y ) ) driver = self . _current_application ( ) action = TouchAction ( driver ) action . press ( x = coordinate_X , y = coordinate_Y ) . release ( ) . perform ( )
click element at a certain coordinate
88
6
225,479
def wait_until_element_is_visible ( self , locator , timeout = None , error = None ) : def check_visibility ( ) : visible = self . _is_visible ( locator ) if visible : return elif visible is None : return error or "Element locator '%s' did not match any elements after %s" % ( locator , self . _format_timeout ( timeout ) ) else : return error or "Element '%s' was not visible in %s" % ( locator , self . _format_timeout ( timeout ) ) self . _wait_until_no_error ( timeout , check_visibility )
Waits until element specified with locator is visible .
141
11
225,480
def wait_until_page_contains ( self , text , timeout = None , error = None ) : if not error : error = "Text '%s' did not appear in <TIMEOUT>" % text self . _wait_until ( timeout , error , self . _is_text_present , text )
Waits until text appears on current page .
67
9
225,481
def wait_until_page_does_not_contain ( self , text , timeout = None , error = None ) : def check_present ( ) : present = self . _is_text_present ( text ) if not present : return else : return error or "Text '%s' did not disappear in %s" % ( text , self . _format_timeout ( timeout ) ) self . _wait_until_no_error ( timeout , check_present )
Waits until text disappears from current page .
101
9
225,482
def wait_until_page_contains_element ( self , locator , timeout = None , error = None ) : if not error : error = "Element '%s' did not appear in <TIMEOUT>" % locator self . _wait_until ( timeout , error , self . _is_element_present , locator )
Waits until element specified with locator appears on current page .
72
13
225,483
def wait_until_page_does_not_contain_element ( self , locator , timeout = None , error = None ) : def check_present ( ) : present = self . _is_element_present ( locator ) if not present : return else : return error or "Element '%s' did not disappear in %s" % ( locator , self . _format_timeout ( timeout ) ) self . _wait_until_no_error ( timeout , check_present )
Waits until element specified with locator disappears from current page .
106
13
225,484
def set_network_connection_status ( self , connectionStatus ) : driver = self . _current_application ( ) return driver . set_network_connection ( int ( connectionStatus ) )
Sets the network connection Status .
40
7
225,485
def pull_file ( self , path , decode = False ) : driver = self . _current_application ( ) theFile = driver . pull_file ( path ) if decode : theFile = base64 . b64decode ( theFile ) return str ( theFile )
Retrieves the file at path and return it s content .
58
13
225,486
def pull_folder ( self , path , decode = False ) : driver = self . _current_application ( ) theFolder = driver . pull_folder ( path ) if decode : theFolder = base64 . b64decode ( theFolder ) return theFolder
Retrieves a folder at path . Returns the folder s contents zipped .
55
16
225,487
def push_file ( self , path , data , encode = False ) : driver = self . _current_application ( ) data = to_bytes ( data ) if encode : data = base64 . b64encode ( data ) driver . push_file ( path , data )
Puts the data in the file specified as path .
59
11
225,488
def start_activity ( self , appPackage , appActivity , * * opts ) : # Almost the same code as in appium's start activity, # just to keep the same keyword names as in open application arguments = { 'app_wait_package' : 'appWaitPackage' , 'app_wait_activity' : 'appWaitActivity' , 'intent_action' : 'intentAction' , 'intent_category' : 'intentCategory' , 'intent_flags' : 'intentFlags' , 'optional_intent_arguments' : 'optionalIntentArguments' , 'stop_app_on_reset' : 'stopAppOnReset' } data = { } for key , value in arguments . items ( ) : if value in opts : data [ key ] = opts [ value ] driver = self . _current_application ( ) driver . start_activity ( app_package = appPackage , app_activity = appActivity , * * data )
Opens an arbitrary activity during a test . If the activity belongs to another application that application is started and the activity is opened .
208
26
225,489
def install_app ( self , app_path , app_package ) : driver = self . _current_application ( ) driver . install_app ( app_path ) return driver . is_app_installed ( app_package )
Install App via Appium Android only .
49
8
225,490
def click_element ( self , locator ) : self . _info ( "Clicking element '%s'." % locator ) self . _element_find ( locator , True , True ) . click ( )
Click element identified by locator . Key attributes for arbitrary elements are index and name . See introduction for details about locating elements .
46
25
225,491
def click_text ( self , text , exact_match = False ) : self . _element_find_by_text ( text , exact_match ) . click ( )
Click text identified by text . By default tries to click first text involves given text if you would like to click exactly matching text then set exact_match to True . If there are multiple use of text and you do not want first one use locator with Get Web Elements instead .
37
56
225,492
def input_text ( self , locator , text ) : self . _info ( "Typing text '%s' into text field '%s'" % ( text , locator ) ) self . _element_input_text_by_locator ( locator , text )
Types the given text into text field identified by locator . See introduction for details about locating elements .
60
20
225,493
def input_password ( self , locator , text ) : self . _info ( "Typing password into text field '%s'" % locator ) self . _element_input_text_by_locator ( locator , text )
Types the given password into text field identified by locator . Difference between this keyword and Input Text is that this keyword does not log the given password . See introduction for details about locating elements .
52
38
225,494
def input_value ( self , locator , text ) : self . _info ( "Setting text '%s' into text field '%s'" % ( text , locator ) ) self . _element_input_value_by_locator ( locator , text )
Sets the given value into text field identified by locator . This is an IOS only keyword input value makes use of set_value See introduction for details about locating elements .
59
36
225,495
def page_should_contain_text ( self , text , loglevel = 'INFO' ) : if not self . _is_text_present ( text ) : self . log_source ( loglevel ) raise AssertionError ( "Page should have contained text '%s' " "but did not" % text ) self . _info ( "Current page contains text '%s'." % text )
Verifies that current page contains text . If this keyword fails it automatically logs the page source using the log level specified with the optional loglevel argument . Giving NONE as level disables logging .
89
40
225,496
def page_should_not_contain_text ( self , text , loglevel = 'INFO' ) : if self . _is_text_present ( text ) : self . log_source ( loglevel ) raise AssertionError ( "Page should not have contained text '%s'" % text ) self . _info ( "Current page does not contains text '%s'." % text )
Verifies that current page not contains text . If this keyword fails it automatically logs the page source using the log level specified with the optional loglevel argument . Giving NONE as level disables logging .
87
41
225,497
def page_should_contain_element ( self , locator , loglevel = 'INFO' ) : if not self . _is_element_present ( locator ) : self . log_source ( loglevel ) raise AssertionError ( "Page should have contained element '%s' " "but did not" % locator ) self . _info ( "Current page contains element '%s'." % locator )
Verifies that current page contains locator element . If this keyword fails it automatically logs the page source using the log level specified with the optional loglevel argument . Giving NONE as level disables logging .
93
42
225,498
def page_should_not_contain_element ( self , locator , loglevel = 'INFO' ) : if self . _is_element_present ( locator ) : self . log_source ( loglevel ) raise AssertionError ( "Page should not have contained element '%s'" % locator ) self . _info ( "Current page not contains element '%s'." % locator )
Verifies that current page not contains locator element . If this keyword fails it automatically logs the page source using the log level specified with the optional loglevel argument . Giving NONE as level disables logging .
90
43
225,499
def element_should_be_disabled ( self , locator , loglevel = 'INFO' ) : if self . _element_find ( locator , True , True ) . is_enabled ( ) : self . log_source ( loglevel ) raise AssertionError ( "Element '%s' should be disabled " "but did not" % locator ) self . _info ( "Element '%s' is disabled ." % locator )
Verifies that element identified with locator is disabled . Key attributes for arbitrary elements are id and name . See introduction for details about locating elements .
98
29