idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
235,700
def get_open_fds ( ) : pid = os . getpid ( ) procs = subprocess . check_output ( [ "lsof" , '-w' , '-Ff' , "-p" , str ( pid ) ] ) procs = procs . decode ( "utf-8" ) return len ( [ s for s in procs . split ( '\n' ) if s and s [ 0 ] == 'f' and s [ 1 : ] . isdigit ( ) ] )
Return the number of open file descriptors for current process
112
11
235,701
def acquire ( self ) : start_time = time . time ( ) while True : try : self . fd = os . open ( self . lockfile , os . O_CREAT | os . O_EXCL | os . O_RDWR ) break except ( OSError , ) as e : if e . errno != errno . EEXIST : raise if ( time . time ( ) - start_time ) >= self . timeout : raise FileLockException ( "%s: Timeout occured." % self . lockfile ) time . sleep ( self . delay ) self . is_locked = True
Acquire the lock if possible . If the lock is in use it check again every delay seconds . It does this until it either gets the lock or exceeds timeout number of seconds in which case it throws an exception .
132
43
235,702
def release ( self ) : if self . is_locked : os . close ( self . fd ) os . unlink ( self . lockfile ) self . is_locked = False
Get rid of the lock by deleting the lockfile . When working in a with statement this gets automatically called at the end .
39
25
235,703
def filter ( self , names ) : names = list_strings ( names ) fnames = [ ] for f in names : for pat in self . pats : if fnmatch . fnmatch ( f , pat ) : fnames . append ( f ) return fnames
Returns a list with the names matching the pattern .
56
10
235,704
def match ( self , name ) : for pat in self . pats : if fnmatch . fnmatch ( name , pat ) : return True return False
Returns True if name matches one of the patterns .
32
10
235,705
def deprecated ( replacement = None , message = None ) : def wrap ( old ) : def wrapped ( * args , * * kwargs ) : msg = "%s is deprecated" % old . __name__ if replacement is not None : if isinstance ( replacement , property ) : r = replacement . fget elif isinstance ( replacement , ( classmethod , staticmethod ) ) : r = replacement . __func__ else : r = replacement msg += "; use %s in %s instead." % ( r . __name__ , r . __module__ ) if message is not None : msg += "\n" + message warnings . simplefilter ( 'default' ) warnings . warn ( msg , DeprecationWarning , stacklevel = 2 ) return old ( * args , * * kwargs ) return wrapped return wrap
Decorator to mark classes or functions as deprecated with a possible replacement .
173
15
235,706
def install_excepthook ( hook_type = "color" , * * kwargs ) : try : from IPython . core import ultratb except ImportError : import warnings warnings . warn ( "Cannot install excepthook, IPyhon.core.ultratb not available" ) return 1 # Select the hook. hook = dict ( color = ultratb . ColorTB , verbose = ultratb . VerboseTB , ) . get ( hook_type . lower ( ) , None ) if hook is None : return 2 import sys sys . excepthook = hook ( * * kwargs ) return 0
This function replaces the original python traceback with an improved version from Ipython . Use color for colourful traceback formatting verbose for Ka - Ping Yee s cgitb . py version kwargs are the keyword arguments passed to the constructor . See IPython . core . ultratb . py for more info .
138
65
235,707
def regrep ( filename , patterns , reverse = False , terminate_on_match = False , postprocess = str ) : compiled = { k : re . compile ( v ) for k , v in patterns . items ( ) } matches = collections . defaultdict ( list ) gen = reverse_readfile ( filename ) if reverse else zopen ( filename , "rt" ) for i , l in enumerate ( gen ) : for k , p in compiled . items ( ) : m = p . search ( l ) if m : matches [ k ] . append ( [ [ postprocess ( g ) for g in m . groups ( ) ] , - i if reverse else i ] ) if terminate_on_match and all ( [ len ( matches . get ( k , [ ] ) ) for k in compiled . keys ( ) ] ) : break try : # Try to close open file handle. Pass if it is a generator. gen . close ( ) except : pass return matches
A powerful regular expression version of grep .
204
8
235,708
def cached_class ( klass ) : cache = { } @ wraps ( klass , assigned = ( "__name__" , "__module__" ) , updated = ( ) ) class _decorated ( klass ) : # The wraps decorator can't do this because __doc__ # isn't writable once the class is created __doc__ = klass . __doc__ def __new__ ( cls , * args , * * kwargs ) : key = ( cls , ) + args + tuple ( kwargs . items ( ) ) try : inst = cache . get ( key , None ) except TypeError : # Can't cache this set of arguments inst = key = None if inst is None : # Technically this is cheating, but it works, # and takes care of initializing the instance # (so we can override __init__ below safely); # calling up to klass.__new__ would be the # "official" way to create the instance, but # that raises DeprecationWarning if there are # args or kwargs and klass does not override # __new__ (which most classes don't), because # object.__new__ takes no parameters (and in # Python 3 the warning will become an error) inst = klass ( * args , * * kwargs ) # This makes isinstance and issubclass work # properly inst . __class__ = cls if key is not None : cache [ key ] = inst return inst def __init__ ( self , * args , * * kwargs ) : # This will be called every time __new__ is # called, so we skip initializing here and do # it only when the instance is created above pass return _decorated
Decorator to cache class instances by constructor arguments . This results in a class that behaves like a singleton for each set of constructor arguments ensuring efficiency .
369
31
235,709
def operator_from_str ( op ) : d = { "==" : operator . eq , "!=" : operator . ne , ">" : operator . gt , ">=" : operator . ge , "<" : operator . lt , "<=" : operator . le , '+' : operator . add , '-' : operator . sub , '*' : operator . mul , '%' : operator . mod , '^' : operator . xor , } try : d [ '/' ] = operator . truediv except AttributeError : pass return d [ op ]
Return the operator associated to the given string op .
125
10
235,710
def run ( self , timeout = None , * * kwargs ) : from subprocess import Popen , PIPE def target ( * * kw ) : try : # print('Thread started') self . process = Popen ( self . command , * * kw ) self . output , self . error = self . process . communicate ( ) self . retcode = self . process . returncode # print('Thread stopped') except : import traceback self . error = traceback . format_exc ( ) self . retcode = - 1 # default stdout and stderr if 'stdout' not in kwargs : kwargs [ 'stdout' ] = PIPE if 'stderr' not in kwargs : kwargs [ 'stderr' ] = PIPE # thread import threading thread = threading . Thread ( target = target , kwargs = kwargs ) thread . start ( ) thread . join ( timeout ) if thread . is_alive ( ) : # print("Terminating process") self . process . terminate ( ) self . killed = True thread . join ( ) return self
Run a command in a separated thread and wait timeout seconds . kwargs are keyword arguments passed to Popen .
244
23
235,711
def marquee ( text = "" , width = 78 , mark = '*' ) : if not text : return ( mark * width ) [ : width ] nmark = ( width - len ( text ) - 2 ) // len ( mark ) // 2 if nmark < 0 : nmark = 0 marks = mark * nmark return '%s %s %s' % ( marks , text , marks )
Return the input string centered in a marquee .
85
9
235,712
def boxed ( msg , ch = "=" , pad = 5 ) : if pad > 0 : msg = pad * ch + " " + msg . strip ( ) + " " + pad * ch return "\n" . join ( [ len ( msg ) * ch , msg , len ( msg ) * ch , ] )
Returns a string in a box
67
6
235,713
def indent ( lines , amount , ch = ' ' ) : padding = amount * ch return padding + ( '\n' + padding ) . join ( lines . split ( '\n' ) )
Indent the lines in a string by padding each one with proper number of pad characters
42
17
235,714
def prof_main ( main ) : @ wraps ( main ) def wrapper ( * args , * * kwargs ) : import sys try : do_prof = sys . argv [ 1 ] == "prof" if do_prof : sys . argv . pop ( 1 ) except Exception : do_prof = False if not do_prof : sys . exit ( main ( ) ) else : print ( "Entering profiling mode..." ) import pstats , cProfile , tempfile prof_file = kwargs . get ( "prof_file" , None ) if prof_file is None : _ , prof_file = tempfile . mkstemp ( ) print ( "Profiling data stored in %s" % prof_file ) sortby = kwargs . get ( "sortby" , "time" ) cProfile . runctx ( "main()" , globals ( ) , locals ( ) , prof_file ) s = pstats . Stats ( prof_file ) s . strip_dirs ( ) . sort_stats ( sortby ) . print_stats ( ) if "retval" not in kwargs : sys . exit ( 0 ) else : return kwargs [ "retval" ] return wrapper
Decorator for profiling main programs .
262
8
235,715
def invalidate ( cls , inst , name ) : inst_cls = inst . __class__ if not hasattr ( inst , '__dict__' ) : raise AttributeError ( "'%s' object has no attribute '__dict__'" % ( inst_cls . __name__ , ) ) if name . startswith ( '__' ) and not name . endswith ( '__' ) : name = '_%s%s' % ( inst_cls . __name__ , name ) if not isinstance ( getattr ( inst_cls , name ) , cls ) : raise AttributeError ( "'%s.%s' is not a %s attribute" % ( inst_cls . __name__ , name , cls . __name__ ) ) if name in inst . __dict__ : del inst . __dict__ [ name ]
Invalidate a lazy attribute .
191
6
235,716
def as_set ( obj ) : if obj is None or isinstance ( obj , collections . Set ) : return obj if not isinstance ( obj , collections . Iterable ) : return set ( ( obj , ) ) else : return set ( obj )
Convert obj into a set returns None if obj is None .
53
13
235,717
def logged ( level = logging . DEBUG ) : def wrap ( f ) : _logger = logging . getLogger ( "{}.{}" . format ( f . __module__ , f . __name__ ) ) def wrapped_f ( * args , * * kwargs ) : _logger . log ( level , "Called at {} with args = {} and kwargs = {}" . format ( datetime . datetime . now ( ) , args , kwargs ) ) data = f ( * args , * * kwargs ) _logger . log ( level , "Done at {} with args = {} and kwargs = {}" . format ( datetime . datetime . now ( ) , args , kwargs ) ) return data return wrapped_f return wrap
Useful logging decorator . If a method is logged the beginning and end of the method call will be logged at a pre - specified level .
169
29
235,718
def enable_logging ( main ) : @ functools . wraps ( main ) def wrapper ( * args , * * kwargs ) : import argparse parser = argparse . ArgumentParser ( ) parser . add_argument ( '--loglevel' , default = "ERROR" , type = str , help = "Set the loglevel. Possible values: CRITICAL, ERROR (default)," "WARNING, INFO, DEBUG" ) options = parser . parse_args ( ) # loglevel is bound to the string value obtained from the command line # argument. # Convert to upper case to allow the user to specify --loglevel=DEBUG # or --loglevel=debug numeric_level = getattr ( logging , options . loglevel . upper ( ) , None ) if not isinstance ( numeric_level , int ) : raise ValueError ( 'Invalid log level: %s' % options . loglevel ) logging . basicConfig ( level = numeric_level ) retcode = main ( * args , * * kwargs ) return retcode return wrapper
This decorator is used to decorate main functions . It adds the initialization of the logger and an argument parser that allows one to select the loglevel . Useful if we are writing simple main functions that call libraries where the logging module is used
228
49
235,719
def which ( cmd ) : def is_exe ( fp ) : return os . path . isfile ( fp ) and os . access ( fp , os . X_OK ) fpath , fname = os . path . split ( cmd ) if fpath : if is_exe ( cmd ) : return cmd else : for path in os . environ [ "PATH" ] . split ( os . pathsep ) : exe_file = os . path . join ( path , cmd ) if is_exe ( exe_file ) : return exe_file return None
Returns full path to a executable .
125
7
235,720
def all_subclasses ( cls ) : subclasses = cls . __subclasses__ ( ) return subclasses + [ g for s in subclasses for g in all_subclasses ( s ) ]
Given a class cls this recursive function returns a list with all subclasses subclasses of subclasses and so on .
44
24
235,721
def find_top_pyfile ( ) : import os frame = currentframe ( ) while True : if frame . f_back is None : finfo = getframeinfo ( frame ) #print(getframeinfo(frame)) return os . path . abspath ( finfo . filename ) frame = frame . f_back
This function inspects the Cpython frame to find the path of the script .
68
16
235,722
def pprint_table ( table , out = sys . stdout , rstrip = False ) : def max_width_col ( table , col_idx ) : """ Get the maximum width of the given column index """ return max ( [ len ( row [ col_idx ] ) for row in table ] ) if rstrip : for row_idx , row in enumerate ( table ) : table [ row_idx ] = [ c . rstrip ( ) for c in row ] col_paddings = [ ] ncols = len ( table [ 0 ] ) for i in range ( ncols ) : col_paddings . append ( max_width_col ( table , i ) ) for row in table : # left col out . write ( row [ 0 ] . ljust ( col_paddings [ 0 ] + 1 ) ) # rest of the cols for i in range ( 1 , len ( row ) ) : col = row [ i ] . rjust ( col_paddings [ i ] + 2 ) out . write ( col ) out . write ( "\n" )
Prints out a table of data padded for alignment Each row must have the same number of columns .
239
20
235,723
def gcd ( * numbers ) : n = numbers [ 0 ] for i in numbers : n = pygcd ( n , i ) return n
Returns the greatest common divisor for a sequence of numbers .
31
13
235,724
def lcm ( * numbers ) : n = 1 for i in numbers : n = ( i * n ) // gcd ( i , n ) return n
Return lowest common multiple of a sequence of numbers .
33
10
235,725
def gcd_float ( numbers , tol = 1e-8 ) : def pair_gcd_tol ( a , b ) : """Calculate the Greatest Common Divisor of a and b. Unless b==0, the result will have the same sign as b (so that when b is divided by it, the result comes out positive). """ while b > tol : a , b = b , a % b return a n = numbers [ 0 ] for i in numbers : n = pair_gcd_tol ( n , i ) return n
Returns the greatest common divisor for a sequence of numbers . Uses a numerical tolerance so can be used on floats
121
23
235,726
def chunks ( items , n ) : it = iter ( items ) chunk = tuple ( itertools . islice ( it , n ) ) while chunk : yield chunk chunk = tuple ( itertools . islice ( it , n ) )
Yield successive n - sized chunks from a list - like object .
53
14
235,727
def iterator_from_slice ( s ) : import numpy as np start = s . start if s . start is not None else 0 step = s . step if s . step is not None else 1 if s . stop is None : # Infinite iterator. return itertools . count ( start = start , step = step ) else : # xrange-like iterator that supports float. return iter ( np . arange ( start , s . stop , step ) )
Constructs an iterator given a slice object s .
98
10
235,728
def colored_map ( text , cmap ) : if not __ISON : return text for key , v in cmap . items ( ) : if isinstance ( v , dict ) : text = text . replace ( key , colored ( key , * * v ) ) else : text = text . replace ( key , colored ( key , color = v ) ) return text
Return colorized text . cmap is a dict mapping tokens to color options .
77
16
235,729
def cprint_map ( text , cmap , * * kwargs ) : try : print ( colored_map ( text , cmap ) , * * kwargs ) except TypeError : # flush is not supported by py2.7 kwargs . pop ( "flush" , None ) print ( colored_map ( text , cmap ) , * * kwargs )
Print colorize text . cmap is a dict mapping keys to color options . kwargs are passed to print function
82
24
235,730
def as_dict ( self ) : d = { "@module" : self . __class__ . __module__ , "@class" : self . __class__ . __name__ } try : parent_module = self . __class__ . __module__ . split ( '.' ) [ 0 ] module_version = import_module ( parent_module ) . __version__ d [ "@version" ] = u"{}" . format ( module_version ) except AttributeError : d [ "@version" ] = None args = getargspec ( self . __class__ . __init__ ) . args def recursive_as_dict ( obj ) : if isinstance ( obj , ( list , tuple ) ) : return [ recursive_as_dict ( it ) for it in obj ] elif isinstance ( obj , dict ) : return { kk : recursive_as_dict ( vv ) for kk , vv in obj . items ( ) } elif hasattr ( obj , "as_dict" ) : return obj . as_dict ( ) return obj for c in args : if c != "self" : try : a = self . __getattribute__ ( c ) except AttributeError : try : a = self . __getattribute__ ( "_" + c ) except AttributeError : raise NotImplementedError ( "Unable to automatically determine as_dict " "format from class. MSONAble requires all " "args to be present as either self.argname or " "self._argname, and kwargs to be present under" "a self.kwargs variable to automatically " "determine the dict format. Alternatively, " "you can implement both as_dict and from_dict." ) d [ c ] = recursive_as_dict ( a ) if hasattr ( self , "kwargs" ) : d . update ( * * self . kwargs ) if hasattr ( self , "_kwargs" ) : d . update ( * * self . _kwargs ) return d
A JSON serializable dict representation of an object .
433
10
235,731
def process_decoded ( self , d ) : if isinstance ( d , dict ) : if "@module" in d and "@class" in d : modname = d [ "@module" ] classname = d [ "@class" ] else : modname = None classname = None if modname and modname not in [ "bson.objectid" , "numpy" ] : if modname == "datetime" and classname == "datetime" : try : dt = datetime . datetime . strptime ( d [ "string" ] , "%Y-%m-%d %H:%M:%S.%f" ) except ValueError : dt = datetime . datetime . strptime ( d [ "string" ] , "%Y-%m-%d %H:%M:%S" ) return dt mod = __import__ ( modname , globals ( ) , locals ( ) , [ classname ] , 0 ) if hasattr ( mod , classname ) : cls_ = getattr ( mod , classname ) data = { k : v for k , v in d . items ( ) if not k . startswith ( "@" ) } if hasattr ( cls_ , "from_dict" ) : return cls_ . from_dict ( data ) elif np is not None and modname == "numpy" and classname == "array" : return np . array ( d [ "data" ] , dtype = d [ "dtype" ] ) elif ( bson is not None ) and modname == "bson.objectid" and classname == "ObjectId" : return bson . objectid . ObjectId ( d [ "oid" ] ) return { self . process_decoded ( k ) : self . process_decoded ( v ) for k , v in d . items ( ) } elif isinstance ( d , list ) : return [ self . process_decoded ( x ) for x in d ] return d
Recursive method to support decoding dicts and lists containing pymatgen objects .
441
17
235,732
def nCr ( n , r ) : f = math . factorial return int ( f ( n ) / f ( r ) / f ( n - r ) )
Calculates nCr .
35
6
235,733
def nPr ( n , r ) : f = math . factorial return int ( f ( n ) / f ( n - r ) )
Calculates nPr .
30
6
235,734
def copy_r ( src , dst ) : abssrc = os . path . abspath ( src ) absdst = os . path . abspath ( dst ) try : os . makedirs ( absdst ) except OSError : # If absdst exists, an OSError is raised. We ignore this error. pass for f in os . listdir ( abssrc ) : fpath = os . path . join ( abssrc , f ) if os . path . isfile ( fpath ) : shutil . copy ( fpath , absdst ) elif not absdst . startswith ( fpath ) : copy_r ( fpath , os . path . join ( absdst , f ) ) else : warnings . warn ( "Cannot copy %s to itself" % fpath )
Implements a recursive copy function similar to Unix s cp - r command . Surprisingly python does not have a real equivalent . shutil . copytree only works if the destination directory is not present .
181
40
235,735
def gzip_dir ( path , compresslevel = 6 ) : for f in os . listdir ( path ) : full_f = os . path . join ( path , f ) if not f . lower ( ) . endswith ( "gz" ) : with open ( full_f , 'rb' ) as f_in , GzipFile ( '{}.gz' . format ( full_f ) , 'wb' , compresslevel = compresslevel ) as f_out : shutil . copyfileobj ( f_in , f_out ) shutil . copystat ( full_f , '{}.gz' . format ( full_f ) ) os . remove ( full_f )
Gzips all files in a directory . Note that this is different from shutil . make_archive which creates a tar archive . The aim of this method is to create gzipped files that can still be read using common Unix - style commands like zless or zcat .
150
57
235,736
def compress_file ( filepath , compression = "gz" ) : if compression not in [ "gz" , "bz2" ] : raise ValueError ( "Supported compression formats are 'gz' and 'bz2'." ) from monty . io import zopen if not filepath . lower ( ) . endswith ( ".%s" % compression ) : with open ( filepath , 'rb' ) as f_in , zopen ( '%s.%s' % ( filepath , compression ) , 'wb' ) as f_out : f_out . writelines ( f_in ) os . remove ( filepath )
Compresses a file with the correct extension . Functions like standard Unix command line gzip and bzip2 in the sense that the original uncompressed files are not retained .
139
34
235,737
def compress_dir ( path , compression = "gz" ) : for parent , subdirs , files in os . walk ( path ) : for f in files : compress_file ( os . path . join ( parent , f ) , compression = compression )
Recursively compresses all files in a directory . Note that this compresses all files singly i . e . it does not create a tar archive . For that just use Python tarfile class .
54
41
235,738
def decompress_file ( filepath ) : toks = filepath . split ( "." ) file_ext = toks [ - 1 ] . upper ( ) from monty . io import zopen if file_ext in [ "BZ2" , "GZ" , "Z" ] : with open ( "." . join ( toks [ 0 : - 1 ] ) , 'wb' ) as f_out , zopen ( filepath , 'rb' ) as f_in : f_out . writelines ( f_in ) os . remove ( filepath )
Decompresses a file with the correct extension . Automatically detects gz bz2 or z extension .
125
22
235,739
def decompress_dir ( path ) : for parent , subdirs , files in os . walk ( path ) : for f in files : decompress_file ( os . path . join ( parent , f ) )
Recursively decompresses all files in a directory .
46
11
235,740
def remove ( path , follow_symlink = False ) : if os . path . isfile ( path ) : os . remove ( path ) elif os . path . islink ( path ) : if follow_symlink : remove ( os . readlink ( path ) ) os . unlink ( path ) else : shutil . rmtree ( path )
Implements an remove function that will delete files folder trees and symlink trees
78
17
235,741
def compute_hashes ( obj , hashes = frozenset ( [ 'md5' ] ) ) : if not ( hasattr ( obj , 'read' ) or isinstance ( obj , bytes ) ) : raise ValueError ( "Cannot compute hash for given input: a file-like object or bytes-like object is required" ) hashers = dict ( ) for alg in hashes : try : hashers [ alg ] = hashlib . new ( alg . lower ( ) ) except ValueError : logging . warning ( "Unable to validate file contents using unknown hash algorithm: %s" , alg ) while True : if hasattr ( obj , 'read' ) : block = obj . read ( 1024 ** 2 ) else : block = obj obj = None if not block : break for i in hashers . values ( ) : i . update ( block ) hashes = dict ( ) for alg , h in hashers . items ( ) : digest = h . hexdigest ( ) base64digest = base64 . b64encode ( h . digest ( ) ) # base64.b64encode returns str on python 2.7 and bytes on 3.x, so deal with that and always return a str if not isinstance ( base64digest , str ) and isinstance ( base64digest , bytes ) : base64digest = base64digest . decode ( 'ascii' ) hashes [ alg ] = digest hashes [ alg + "_base64" ] = base64digest return hashes
Digests input data read from file - like object fd or passed directly as bytes - like object . Compute hashes for multiple algorithms . Default is MD5 . Returns a tuple of a hex - encoded digest string and a base64 - encoded value suitable for an HTTP header .
328
56
235,742
def compute_file_hashes ( file_path , hashes = frozenset ( [ 'md5' ] ) ) : if not os . path . exists ( file_path ) : logging . warning ( "%s does not exist" % file_path ) return else : logging . debug ( "Computing [%s] hashes for file [%s]" % ( ',' . join ( hashes ) , file_path ) ) try : with open ( file_path , 'rb' ) as fd : return compute_hashes ( fd , hashes ) except ( IOError , OSError ) as e : logging . warning ( "Error while calculating digest(s) for file %s: %s" % ( file_path , str ( e ) ) ) raise
Digests data read from file denoted by file_path .
165
13
235,743
def validate ( self , processes = 1 , fast = False , completeness_only = False , callback = None ) : self . _validate_structure ( ) self . _validate_bagittxt ( ) self . _validate_fetch ( ) self . _validate_contents ( processes = processes , fast = fast , completeness_only = completeness_only , callback = callback ) return True
Checks the structure and contents are valid .
89
9
235,744
def _validate_fetch ( self ) : for url , file_size , filename in self . fetch_entries ( ) : # fetch_entries will raise a BagError for unsafe filenames # so at this point we will check only that the URL is minimally # well formed: parsed_url = urlparse ( url ) # only check for a scheme component since per the spec the URL field is actually a URI per # RFC3986 (https://tools.ietf.org/html/rfc3986) if not all ( parsed_url . scheme ) : raise BagError ( _ ( 'Malformed URL in fetch.txt: %s' ) % url )
Validate the fetch . txt file
145
8
235,745
def _validate_completeness ( self ) : errors = list ( ) # First we'll make sure there's no mismatch between the filesystem # and the list of files in the manifest(s) only_in_manifests , only_on_fs , only_in_fetch = self . compare_manifests_with_fs_and_fetch ( ) for path in only_in_manifests : e = FileMissing ( path ) LOGGER . warning ( force_unicode ( e ) ) errors . append ( e ) for path in only_on_fs : e = UnexpectedFile ( path ) LOGGER . warning ( force_unicode ( e ) ) errors . append ( e ) for path in only_in_fetch : e = UnexpectedRemoteFile ( path ) # this is non-fatal according to spec but the warning is still reasonable LOGGER . warning ( force_unicode ( e ) ) if errors : raise BagValidationError ( _ ( "Bag validation failed" ) , errors )
Verify that the actual file manifests match the files in the data directory
223
14
235,746
def get_detection_results ( url , timeout , metadata = False , save_har = False ) : plugins = load_plugins ( ) if not plugins : raise NoPluginsError ( 'No plugins found' ) logger . debug ( '[+] Starting detection with %(n)d plugins' , { 'n' : len ( plugins ) } ) response = get_response ( url , plugins , timeout ) # Save HAR if save_har : fd , path = tempfile . mkstemp ( suffix = '.har' ) logger . info ( f'Saving HAR file to {path}' ) with open ( fd , 'w' ) as f : json . dump ( response [ 'har' ] , f ) det = Detector ( response , plugins , url ) softwares = det . get_results ( metadata = metadata ) output = { 'url' : url , 'softwares' : softwares , } return output
Return results from detector .
202
5
235,747
def get_plugins ( metadata ) : plugins = load_plugins ( ) if not plugins : raise NoPluginsError ( 'No plugins found' ) results = [ ] for p in sorted ( plugins . get_all ( ) , key = attrgetter ( 'name' ) ) : if metadata : data = { 'name' : p . name , 'homepage' : p . homepage } hints = getattr ( p , 'hints' , [ ] ) if hints : data [ 'hints' ] = hints results . append ( data ) else : results . append ( p . name ) return results
Return the registered plugins .
129
5
235,748
def get_most_complete_pm ( pms ) : if not pms : return None selected_version = None selected_presence = None for pm in pms : if pm . version : if not selected_version : selected_version = pm else : if len ( pm . version ) > len ( selected_version . version ) : selected_version = pm elif pm . presence : selected_presence = pm return selected_version or selected_presence
Return plugin match with longer version if not available will return plugin match with presence = True
98
17
235,749
def docker_container ( ) : if SETUP_SPLASH : dm = DockerManager ( ) dm . start_container ( ) try : requests . post ( f'{SPLASH_URL}/_gc' ) except requests . exceptions . RequestException : pass yield
Start the Splash server on a Docker container . If the container doesn t exist it is created and named splash - detectem .
59
25
235,750
def is_url_allowed ( url ) : blacklist = [ r'\.ttf' , r'\.woff' , r'fonts\.googleapis\.com' , r'\.png' , r'\.jpe?g' , r'\.gif' , r'\.svg' ] for ft in blacklist : if re . search ( ft , url ) : return False return True
Return True if url is not in blacklist .
86
9
235,751
def is_valid_mimetype ( response ) : blacklist = [ 'image/' , ] mimetype = response . get ( 'mimeType' ) if not mimetype : return True for bw in blacklist : if bw in mimetype : return False return True
Return True if the mimetype is not blacklisted .
62
12
235,752
def get_charset ( response ) : # Set default charset charset = DEFAULT_CHARSET m = re . findall ( r';charset=(.*)' , response . get ( 'mimeType' , '' ) ) if m : charset = m [ 0 ] return charset
Return charset from response or default charset .
66
10
235,753
def create_lua_script ( plugins ) : lua_template = pkg_resources . resource_string ( 'detectem' , 'script.lua' ) template = Template ( lua_template . decode ( 'utf-8' ) ) javascript_data = to_javascript_data ( plugins ) return template . substitute ( js_data = json . dumps ( javascript_data ) )
Return script template filled up with plugin javascript data .
84
10
235,754
def to_javascript_data ( plugins ) : def escape ( v ) : return re . sub ( r'"' , r'\\"' , v ) def dom_matchers ( p ) : dom_matchers = p . get_matchers ( 'dom' ) escaped_dom_matchers = [ ] for dm in dom_matchers : check_statement , version_statement = dm escaped_dom_matchers . append ( { 'check_statement' : escape ( check_statement ) , # Escape '' and not None 'version_statement' : escape ( version_statement or '' ) , } ) return escaped_dom_matchers return [ { 'name' : p . name , 'matchers' : dom_matchers ( p ) } for p in plugins . with_dom_matchers ( ) ]
Return a dictionary with all JavaScript matchers . Quotes are escaped .
176
14
235,755
def get_response ( url , plugins , timeout = SPLASH_TIMEOUT ) : lua_script = create_lua_script ( plugins ) lua = urllib . parse . quote_plus ( lua_script ) page_url = f'{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}' try : with docker_container ( ) : logger . debug ( '[+] Sending request to Splash instance' ) res = requests . get ( page_url ) except requests . exceptions . ConnectionError : raise SplashError ( "Could not connect to Splash server {}" . format ( SPLASH_URL ) ) logger . debug ( '[+] Response received' ) json_data = res . json ( ) if res . status_code in ERROR_STATUS_CODES : raise SplashError ( get_splash_error ( json_data ) ) softwares = json_data [ 'softwares' ] scripts = json_data [ 'scripts' ] . values ( ) har = get_valid_har ( json_data [ 'har' ] ) js_error = get_evaljs_error ( json_data ) if js_error : logger . debug ( '[+] WARNING: failed to eval JS matchers: %(n)s' , { 'n' : js_error } ) else : logger . debug ( '[+] Detected %(n)d softwares from the DOM' , { 'n' : len ( softwares ) } ) logger . debug ( '[+] Detected %(n)d scripts from the DOM' , { 'n' : len ( scripts ) } ) logger . debug ( '[+] Final HAR has %(n)d valid entries' , { 'n' : len ( har ) } ) return { 'har' : har , 'scripts' : scripts , 'softwares' : softwares }
Return response with HAR inline scritps and software detected by JS matchers .
415
16
235,756
def get_valid_har ( har_data ) : new_entries = [ ] entries = har_data . get ( 'log' , { } ) . get ( 'entries' , [ ] ) logger . debug ( '[+] Detected %(n)d entries in HAR' , { 'n' : len ( entries ) } ) for entry in entries : url = entry [ 'request' ] [ 'url' ] if not is_url_allowed ( url ) : continue response = entry [ 'response' ] [ 'content' ] if not is_valid_mimetype ( response ) : continue if response . get ( 'text' ) : charset = get_charset ( response ) response [ 'text' ] = base64 . b64decode ( response [ 'text' ] ) . decode ( charset ) else : response [ 'text' ] = '' new_entries . append ( entry ) logger . debug ( '[+] Added URL: %(url)s ...' , { 'url' : url [ : 100 ] } ) return new_entries
Return list of valid HAR entries .
235
7
235,757
def _script_to_har_entry ( cls , script , url ) : entry = { 'request' : { 'url' : url } , 'response' : { 'url' : url , 'content' : { 'text' : script } } } cls . _set_entry_type ( entry , INLINE_SCRIPT_ENTRY ) return entry
Return entry for embed script
80
5
235,758
def mark_entries ( self , entries ) : for entry in entries : self . _set_entry_type ( entry , RESOURCE_ENTRY ) # If first entry doesn't have a redirect, set is as main entry main_entry = entries [ 0 ] main_location = self . _get_location ( main_entry ) if not main_location : self . _set_entry_type ( main_entry , MAIN_ENTRY ) return # Resolve redirected URL and see if it's in the rest of entries main_url = urllib . parse . urljoin ( get_url ( main_entry ) , main_location ) for entry in entries [ 1 : ] : url = get_url ( entry ) if url == main_url : self . _set_entry_type ( entry , MAIN_ENTRY ) break else : # In fail case, set the first entry self . _set_entry_type ( main_entry , MAIN_ENTRY )
Mark one entry as main entry and the rest as resource entry .
210
13
235,759
def get_hints ( self , plugin ) : hints = [ ] for hint_name in getattr ( plugin , 'hints' , [ ] ) : hint_plugin = self . _plugins . get ( hint_name ) if hint_plugin : hint_result = Result ( name = hint_plugin . name , homepage = hint_plugin . homepage , from_url = self . requested_url , type = HINT_TYPE , plugin = plugin . name , ) hints . append ( hint_result ) logger . debug ( f'{plugin.name} & hint {hint_result.name} detected' ) else : logger . error ( f'{plugin.name} hints an invalid plugin: {hint_name}' ) return hints
Return plugin hints from plugin .
160
6
235,760
def process_from_splash ( self ) : for software in self . _softwares_from_splash : plugin = self . _plugins . get ( software [ 'name' ] ) # Determine if it's a version or presence result try : additional_data = { 'version' : software [ 'version' ] } except KeyError : additional_data = { 'type' : INDICATOR_TYPE } self . _results . add_result ( Result ( name = plugin . name , homepage = plugin . homepage , from_url = self . requested_url , plugin = plugin . name , * * additional_data , ) ) for hint in self . get_hints ( plugin ) : self . _results . add_result ( hint )
Add softwares found in the DOM
161
8
235,761
def process_har ( self ) : hints = [ ] version_plugins = self . _plugins . with_version_matchers ( ) generic_plugins = self . _plugins . with_generic_matchers ( ) for entry in self . har : for plugin in version_plugins : pm = self . apply_plugin_matchers ( plugin , entry ) if not pm : continue # Set name if matchers could detect modular name if pm . name : name = '{}-{}' . format ( plugin . name , pm . name ) else : name = plugin . name if pm . version : self . _results . add_result ( Result ( name = name , version = pm . version , homepage = plugin . homepage , from_url = get_url ( entry ) , plugin = plugin . name , ) ) elif pm . presence : # Try to get version through file hashes version = get_version_via_file_hashes ( plugin , entry ) if version : self . _results . add_result ( Result ( name = name , version = version , homepage = plugin . homepage , from_url = get_url ( entry ) , plugin = plugin . name , ) ) else : self . _results . add_result ( Result ( name = name , homepage = plugin . homepage , from_url = get_url ( entry ) , type = INDICATOR_TYPE , plugin = plugin . name , ) ) hints += self . get_hints ( plugin ) for plugin in generic_plugins : pm = self . apply_plugin_matchers ( plugin , entry ) if not pm : continue plugin_data = plugin . get_information ( entry ) # Only add to results if it's a valid result if 'name' in plugin_data : self . _results . add_result ( Result ( name = plugin_data [ 'name' ] , homepage = plugin_data [ 'homepage' ] , from_url = get_url ( entry ) , type = GENERIC_TYPE , plugin = plugin . name , ) ) hints += self . get_hints ( plugin ) for hint in hints : self . _results . add_result ( hint )
Detect plugins present in the page .
459
7
235,762
def get_results ( self , metadata = False ) : results_data = [ ] self . process_har ( ) self . process_from_splash ( ) for rt in sorted ( self . _results . get_results ( ) ) : rdict = { 'name' : rt . name } if rt . version : rdict [ 'version' ] = rt . version if metadata : rdict [ 'homepage' ] = rt . homepage rdict [ 'type' ] = rt . type rdict [ 'from_url' ] = rt . from_url rdict [ 'plugin' ] = rt . plugin results_data . append ( rdict ) return results_data
Return results of the analysis .
153
6
235,763
def load_plugins ( ) : loader = _PluginLoader ( ) for pkg in PLUGIN_PACKAGES : loader . load_plugins ( pkg ) return loader . plugins
Return the list of plugin instances .
39
7
235,764
def _get_plugin_module_paths ( self , plugin_dir ) : filepaths = [ fp for fp in glob . glob ( '{}/**/*.py' . format ( plugin_dir ) , recursive = True ) if not fp . endswith ( '__init__.py' ) ] rel_paths = [ re . sub ( plugin_dir . rstrip ( '/' ) + '/' , '' , fp ) for fp in filepaths ] module_paths = [ rp . replace ( '/' , '.' ) . replace ( '.py' , '' ) for rp in rel_paths ] return module_paths
Return a list of every module in plugin_dir .
148
11
235,765
def load_plugins ( self , plugins_package ) : try : # Resolve directory in the filesystem plugin_dir = find_spec ( plugins_package ) . submodule_search_locations [ 0 ] except ImportError : logger . error ( "Could not load plugins package '%(pkg)s'" , { 'pkg' : plugins_package } ) return for module_path in self . _get_plugin_module_paths ( plugin_dir ) : # Load the module dynamically spec = find_spec ( '{}.{}' . format ( plugins_package , module_path ) ) m = module_from_spec ( spec ) spec . loader . exec_module ( m ) # Get classes from module and extract the plugin classes classes = inspect . getmembers ( m , predicate = inspect . isclass ) for _ , klass in classes : # Avoid imports processing if klass . __module__ != spec . name : continue # Avoid classes not ending in Plugin if not klass . __name__ . endswith ( 'Plugin' ) : continue instance = klass ( ) if self . _is_plugin_ok ( instance ) : self . plugins . add ( instance )
Load plugins from plugins_package module .
252
8
235,766
def extract_named_group ( text , named_group , matchers , return_presence = False ) : presence = False for matcher in matchers : if isinstance ( matcher , str ) : v = re . search ( matcher , text , flags = re . DOTALL ) if v : dict_result = v . groupdict ( ) try : return dict_result [ named_group ] except KeyError : if dict_result : # It's other named group matching, discard continue else : # It's a matcher without named_group # but we can't return it until every matcher pass # because a following matcher could have a named group presence = True elif callable ( matcher ) : v = matcher ( text ) if v : return v if return_presence and presence : return 'presence' return None
Return named_group match from text reached by using a matcher from matchers .
179
17
235,767
def parsed ( self ) : return BeautifulSoup ( self . response . content , features = self . browser . parser , )
Lazily parse response content using HTML parser specified by the browser .
26
14
235,768
def _build_send_args ( self , * * kwargs ) : out = { } out . update ( self . _default_send_args ) out . update ( kwargs ) return out
Merge optional arguments with defaults .
44
7
235,769
def open ( self , url , method = 'get' , * * kwargs ) : response = self . session . request ( method , url , * * self . _build_send_args ( * * kwargs ) ) self . _update_state ( response )
Open a URL .
59
4
235,770
def _update_state ( self , response ) : # Clear trailing states self . _states = self . _states [ : self . _cursor + 1 ] # Append new state state = RoboState ( self , response ) self . _states . append ( state ) self . _cursor += 1 # Clear leading states if self . _maxlen : decrement = len ( self . _states ) - self . _maxlen if decrement > 0 : self . _states = self . _states [ decrement : ] self . _cursor -= decrement
Update the state of the browser . Create a new state object and append to or overwrite the browser s state history .
119
23
235,771
def _traverse ( self , n = 1 ) : if not self . history : raise exceptions . RoboError ( 'Not tracking history' ) cursor = self . _cursor + n if cursor >= len ( self . _states ) or cursor < 0 : raise exceptions . RoboError ( 'Index out of range' ) self . _cursor = cursor
Traverse state history . Used by back and forward methods .
74
12
235,772
def get_link ( self , text = None , * args , * * kwargs ) : return helpers . find ( self . parsed , _link_ptn , text = text , * args , * * kwargs )
Find an anchor or button by containing text as well as standard BeautifulSoup arguments .
49
17
235,773
def get_links ( self , text = None , * args , * * kwargs ) : return helpers . find_all ( self . parsed , _link_ptn , text = text , * args , * * kwargs )
Find anchors or buttons by containing text as well as standard BeautifulSoup arguments .
51
16
235,774
def get_form ( self , id = None , * args , * * kwargs ) : if id : kwargs [ 'id' ] = id form = self . find ( _form_ptn , * args , * * kwargs ) if form is not None : return Form ( form )
Find form by ID as well as standard BeautifulSoup arguments .
66
13
235,775
def follow_link ( self , link , * * kwargs ) : try : href = link [ 'href' ] except KeyError : raise exceptions . RoboError ( 'Link element must have "href" ' 'attribute' ) self . open ( self . _build_url ( href ) , * * kwargs )
Click a link .
69
4
235,776
def submit_form ( self , form , submit = None , * * kwargs ) : # Get HTTP verb method = form . method . upper ( ) # Send request url = self . _build_url ( form . action ) or self . url payload = form . serialize ( submit = submit ) serialized = payload . to_requests ( method ) send_args = self . _build_send_args ( * * kwargs ) send_args . update ( serialized ) response = self . session . request ( method , url , * * send_args ) # Update history self . _update_state ( response )
Submit a form .
134
4
235,777
def find ( soup , name = None , attrs = None , recursive = True , text = None , * * kwargs ) : tags = find_all ( soup , name , attrs or { } , recursive , text , 1 , * * kwargs ) if tags : return tags [ 0 ]
Modified find method ; see find_all above .
65
11
235,778
def _set_initial ( self , initial ) : super ( Select , self ) . _set_initial ( initial ) if not self . _value and self . options : self . value = self . options [ 0 ]
If no option is selected initially select the first option .
46
11
235,779
def encode_if_py2 ( func ) : if not PY2 : return func def wrapped ( * args , * * kwargs ) : ret = func ( * args , * * kwargs ) if not isinstance ( ret , unicode ) : raise TypeError ( 'Wrapped function must return `unicode`' ) return ret . encode ( 'utf-8' , 'ignore' ) return wrapped
If Python 2 . x return decorated function encoding unicode return value to UTF - 8 ; else noop .
89
22
235,780
def _reduce_age ( self , now ) : if self . max_age : keys = [ key for key , value in iteritems ( self . data ) if now - value [ 'date' ] > self . max_age ] for key in keys : del self . data [ key ]
Reduce size of cache by date .
63
8
235,781
def _reduce_count ( self ) : if self . max_count : while len ( self . data ) > self . max_count : self . data . popitem ( last = False )
Reduce size of cache by count .
42
8
235,782
def store ( self , response ) : if response . status_code not in CACHE_CODES : return now = datetime . datetime . now ( ) self . data [ response . url ] = { 'date' : now , 'response' : response , } logger . info ( 'Stored response in cache' ) self . _reduce_age ( now ) self . _reduce_count ( )
Store response in cache skipping if code is forbidden .
90
10
235,783
def retrieve ( self , request ) : if request . method not in CACHE_VERBS : return try : response = self . data [ request . url ] [ 'response' ] logger . info ( 'Retrieved response from cache' ) return response except KeyError : return None
Look up request in cache skipping if verb is forbidden .
59
11
235,784
def _group_flat_tags ( tag , tags ) : grouped = [ tag ] name = tag . get ( 'name' , '' ) . lower ( ) while tags and tags [ 0 ] . get ( 'name' , '' ) . lower ( ) == name : grouped . append ( tags . pop ( 0 ) ) return grouped
Extract tags sharing the same name as the provided tag . Used to collect options for radio and checkbox inputs .
70
23
235,785
def _parse_fields ( parsed ) : # Note: Call this `out` to avoid name conflict with `fields` module out = [ ] # Prepare field tags tags = parsed . find_all ( _tag_ptn ) for tag in tags : helpers . lowercase_attr_names ( tag ) while tags : tag = tags . pop ( 0 ) try : field = _parse_field ( tag , tags ) except exceptions . InvalidNameError : continue if field is not None : out . append ( field ) return out
Parse form fields from HTML .
110
7
235,786
def add ( self , data , key = None ) : sink = self . options [ key ] if key is not None else self . data for key , value in iteritems ( data ) : sink . add ( key , value )
Add field values to container .
48
6
235,787
def to_requests ( self , method = 'get' ) : out = { } data_key = 'params' if method . lower ( ) == 'get' else 'data' out [ data_key ] = self . data out . update ( self . options ) return dict ( [ ( key , list ( value . items ( multi = True ) ) ) for key , value in iteritems ( out ) ] )
Export to Requests format .
89
6
235,788
def add_field ( self , field ) : if not isinstance ( field , fields . BaseField ) : raise ValueError ( 'Argument "field" must be an instance of ' 'BaseField' ) self . fields . add ( field . name , field )
Add a field .
56
4
235,789
def serialize ( self , submit = None ) : include_fields = prepare_fields ( self . fields , self . submit_fields , submit ) return Payload . from_fields ( include_fields )
Serialize each form field to a Payload container .
43
11
235,790
async def save ( self , db ) : kwargs = { } for col in self . _auto_columns : if not self . has_real_data ( col . name ) : kwargs [ col . name ] = await col . auto_generate ( db , self ) self . __dict__ . update ( kwargs ) # we have to delete the old index key stale_object = await self . __class__ . load ( db , identifier = self . identifier ( ) ) d = { k : ( v . strftime ( DATETIME_FORMAT ) if isinstance ( v , datetime ) else v ) for k , v in self . __dict__ . items ( ) } success = await db . hmset_dict ( self . redis_key ( ) , d ) await self . save_index ( db , stale_object = stale_object ) return success
Save the object to Redis .
194
7
235,791
def check_address ( address ) : if isinstance ( address , tuple ) : check_host ( address [ 0 ] ) check_port ( address [ 1 ] ) elif isinstance ( address , string_types ) : if os . name != 'posix' : raise ValueError ( 'Platform does not support UNIX domain sockets' ) if not ( os . path . exists ( address ) or os . access ( os . path . dirname ( address ) , os . W_OK ) ) : raise ValueError ( 'ADDRESS not a valid socket domain socket ({0})' . format ( address ) ) else : raise ValueError ( 'ADDRESS is not a tuple, string, or character buffer ' '({0})' . format ( type ( address ) . __name__ ) )
Check if the format of the address is correct
169
9
235,792
def check_addresses ( address_list , is_remote = False ) : assert all ( isinstance ( x , ( tuple , string_types ) ) for x in address_list ) if ( is_remote and any ( isinstance ( x , string_types ) for x in address_list ) ) : raise AssertionError ( 'UNIX domain sockets not allowed for remote' 'addresses' ) for address in address_list : check_address ( address )
Check if the format of the addresses is correct
100
9
235,793
def create_logger ( logger = None , loglevel = None , capture_warnings = True , add_paramiko_handler = True ) : logger = logger or logging . getLogger ( '{0}.SSHTunnelForwarder' . format ( __name__ ) ) if not any ( isinstance ( x , logging . Handler ) for x in logger . handlers ) : logger . setLevel ( loglevel or DEFAULT_LOGLEVEL ) console_handler = logging . StreamHandler ( ) _add_handler ( logger , handler = console_handler , loglevel = loglevel or DEFAULT_LOGLEVEL ) if loglevel : # override if loglevel was set logger . setLevel ( loglevel ) for handler in logger . handlers : handler . setLevel ( loglevel ) if add_paramiko_handler : _check_paramiko_handlers ( logger = logger ) if capture_warnings and sys . version_info >= ( 2 , 7 ) : logging . captureWarnings ( True ) pywarnings = logging . getLogger ( 'py.warnings' ) pywarnings . handlers . extend ( logger . handlers ) return logger
Attach or create a new logger and add a console handler if not present
254
14
235,794
def _add_handler ( logger , handler = None , loglevel = None ) : handler . setLevel ( loglevel or DEFAULT_LOGLEVEL ) if handler . level <= logging . DEBUG : _fmt = '%(asctime)s| %(levelname)-4.3s|%(threadName)10.9s/' '%(lineno)04d@%(module)-10.9s| %(message)s' handler . setFormatter ( logging . Formatter ( _fmt ) ) else : handler . setFormatter ( logging . Formatter ( '%(asctime)s| %(levelname)-8s| %(message)s' ) ) logger . addHandler ( handler )
Add a handler to an existing logging . Logger object
163
11
235,795
def _check_paramiko_handlers ( logger = None ) : paramiko_logger = logging . getLogger ( 'paramiko.transport' ) if not paramiko_logger . handlers : if logger : paramiko_logger . handlers = logger . handlers else : console_handler = logging . StreamHandler ( ) console_handler . setFormatter ( logging . Formatter ( '%(asctime)s | %(levelname)-8s| PARAMIKO: ' '%(lineno)03d@%(module)-10s| %(message)s' ) ) paramiko_logger . addHandler ( console_handler )
Add a console handler for paramiko . transport s logger if not present
144
14
235,796
def _remove_none_values ( dictionary ) : return list ( map ( dictionary . pop , [ i for i in dictionary if dictionary [ i ] is None ] ) )
Remove dictionary keys whose value is None
36
7
235,797
def _cli_main ( args = None ) : arguments = _parse_arguments ( args ) # Remove all "None" input values _remove_none_values ( arguments ) verbosity = min ( arguments . pop ( 'verbose' ) , 4 ) levels = [ logging . ERROR , logging . WARNING , logging . INFO , logging . DEBUG , TRACE_LEVEL ] arguments . setdefault ( 'debug_level' , levels [ verbosity ] ) with open_tunnel ( * * arguments ) as tunnel : if tunnel . is_alive : input_ ( ''' Press <Ctrl-C> or <Enter> to stop! ''' )
Pass input arguments to open_tunnel
139
8
235,798
def _make_ssh_forward_handler_class ( self , remote_address_ ) : class Handler ( _ForwardHandler ) : remote_address = remote_address_ ssh_transport = self . _transport logger = self . logger return Handler
Make SSH Handler class
53
4
235,799
def _make_ssh_forward_server ( self , remote_address , local_bind_address ) : _Handler = self . _make_ssh_forward_handler_class ( remote_address ) try : if isinstance ( local_bind_address , string_types ) : forward_maker_class = self . _make_unix_ssh_forward_server_class else : forward_maker_class = self . _make_ssh_forward_server_class _Server = forward_maker_class ( remote_address ) ssh_forward_server = _Server ( local_bind_address , _Handler , logger = self . logger , ) if ssh_forward_server : ssh_forward_server . daemon_threads = self . daemon_forward_servers self . _server_list . append ( ssh_forward_server ) self . tunnel_is_up [ ssh_forward_server . server_address ] = False else : self . _raise ( BaseSSHTunnelForwarderError , 'Problem setting up ssh {0} <> {1} forwarder. You can ' 'suppress this exception by using the `mute_exceptions`' 'argument' . format ( address_to_str ( local_bind_address ) , address_to_str ( remote_address ) ) ) except IOError : self . _raise ( BaseSSHTunnelForwarderError , "Couldn't open tunnel {0} <> {1} might be in use or " "destination not reachable" . format ( address_to_str ( local_bind_address ) , address_to_str ( remote_address ) ) )
Make SSH forward proxy Server class
355
6