idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
237,300
def get ( self , blob_hash ) : if blob_hash is None : return None store = blob_hash [ STORE_HASH_LENGTH : ] store = 'external' + ( '-' if store else '' ) + store cache_folder = config . get ( 'cache' , None ) blob = None if cache_folder : try : with open ( os . path . join ( cache_folder , blob_hash ) , 'rb' ) as f : blob = f . read ( ) except FileNotFoundError : pass if blob is None : spec = self . _get_store_spec ( store ) if spec [ 'protocol' ] == 'file' : full_path = os . path . join ( spec [ 'location' ] , self . database , blob_hash ) try : with open ( full_path , 'rb' ) as f : blob = f . read ( ) except FileNotFoundError : raise DataJointError ( 'Lost access to external blob %s.' % full_path ) from None elif spec [ 'protocol' ] == 's3' : try : blob = S3Folder ( database = self . database , * * spec ) . get ( blob_hash ) except TypeError : raise DataJointError ( 'External store {store} configuration is incomplete.' . format ( store = store ) ) else : raise DataJointError ( 'Unknown external storage protocol "%s"' % spec [ 'protocol' ] ) if cache_folder : if not os . path . exists ( cache_folder ) : os . makedirs ( cache_folder ) safe_write ( os . path . join ( cache_folder , blob_hash ) , blob ) return unpack ( blob )
get an object from external store . Does not need to check whether it s in the table .
369
19
237,301
def delete_garbage ( self ) : self . connection . query ( "DELETE FROM `{db}`.`{tab}` WHERE " . format ( tab = self . table_name , db = self . database ) + " AND " . join ( 'hash NOT IN (SELECT {column_name} FROM {referencing_table})' . format ( * * ref ) for ref in self . references ) or "TRUE" ) print ( 'Deleted %d items' % self . connection . query ( "SELECT ROW_COUNT()" ) . fetchone ( ) [ 0 ] )
Delete items that are no longer referenced . This operation is safe to perform at any time .
131
18
237,302
def clean_store ( self , store , display_progress = True ) : spec = self . _get_store_spec ( store ) progress = tqdm if display_progress else lambda x : x if spec [ 'protocol' ] == 'file' : folder = os . path . join ( spec [ 'location' ] , self . database ) delete_list = set ( os . listdir ( folder ) ) . difference ( self . fetch ( 'hash' ) ) print ( 'Deleting %d unused items from %s' % ( len ( delete_list ) , folder ) , flush = True ) for f in progress ( delete_list ) : os . remove ( os . path . join ( folder , f ) ) elif spec [ 'protocol' ] == 's3' : try : S3Folder ( database = self . database , * * spec ) . clean ( self . fetch ( 'hash' ) ) except TypeError : raise DataJointError ( 'External store {store} configuration is incomplete.' . format ( store = store ) )
Clean unused data in an external storage repository from unused blobs . This must be performed after delete_garbage during low - usage periods to reduce risks of data loss .
227
34
237,303
def is_connection_error ( e ) : return ( isinstance ( e , err . InterfaceError ) and e . args [ 0 ] == "(0, '')" ) or ( isinstance ( e , err . OperationalError ) and e . args [ 0 ] in operation_error_codes . values ( ) )
Checks if error e pertains to a connection issue
67
11
237,304
def decompress ( data ) : d = Decompressor ( ) data = d . decompress ( data ) d . finish ( ) return data
Decompress a complete Brotli - compressed string .
30
12
237,305
def compress ( data , mode = DEFAULT_MODE , quality = lib . BROTLI_DEFAULT_QUALITY , lgwin = lib . BROTLI_DEFAULT_WINDOW , lgblock = 0 , dictionary = b'' ) : # This method uses private variables on the Compressor object, and # generally does a whole lot of stuff that's not supported by the public # API. The goal here is to minimise the number of allocations and copies # we have to do. Users should prefer this method over the Compressor if # they know they have single-shot data. compressor = Compressor ( mode = mode , quality = quality , lgwin = lgwin , lgblock = lgblock , dictionary = dictionary ) compressed_data = compressor . _compress ( data , lib . BROTLI_OPERATION_FINISH ) assert lib . BrotliEncoderIsFinished ( compressor . _encoder ) == lib . BROTLI_TRUE assert ( lib . BrotliEncoderHasMoreOutput ( compressor . _encoder ) == lib . BROTLI_FALSE ) return compressed_data
Compress a string using Brotli .
242
9
237,306
def _compress ( self , data , operation ) : # The 'algorithm' for working out how big to make this buffer is from # the Brotli source code, brotlimodule.cc. original_output_size = int ( math . ceil ( len ( data ) + ( len ( data ) >> 2 ) + 10240 ) ) available_out = ffi . new ( "size_t *" ) available_out [ 0 ] = original_output_size output_buffer = ffi . new ( "uint8_t []" , available_out [ 0 ] ) ptr_to_output_buffer = ffi . new ( "uint8_t **" , output_buffer ) input_size = ffi . new ( "size_t *" , len ( data ) ) input_buffer = ffi . new ( "uint8_t []" , data ) ptr_to_input_buffer = ffi . new ( "uint8_t **" , input_buffer ) rc = lib . BrotliEncoderCompressStream ( self . _encoder , operation , input_size , ptr_to_input_buffer , available_out , ptr_to_output_buffer , ffi . NULL ) if rc != lib . BROTLI_TRUE : # pragma: no cover raise Error ( "Error encountered compressing data." ) assert not input_size [ 0 ] size_of_output = original_output_size - available_out [ 0 ] return ffi . buffer ( output_buffer , size_of_output ) [ : ]
This private method compresses some data in a given mode . This is used because almost all of the code uses the exact same setup . It wouldn t have to but it doesn t hurt at all .
342
40
237,307
def flush ( self ) : chunks = [ ] chunks . append ( self . _compress ( b'' , lib . BROTLI_OPERATION_FLUSH ) ) while lib . BrotliEncoderHasMoreOutput ( self . _encoder ) == lib . BROTLI_TRUE : chunks . append ( self . _compress ( b'' , lib . BROTLI_OPERATION_FLUSH ) ) return b'' . join ( chunks )
Flush the compressor . This will emit the remaining output data but will not destroy the compressor . It can be used for example to ensure that given chunks of content will decompress immediately .
98
37
237,308
def finish ( self ) : chunks = [ ] while lib . BrotliEncoderIsFinished ( self . _encoder ) == lib . BROTLI_FALSE : chunks . append ( self . _compress ( b'' , lib . BROTLI_OPERATION_FINISH ) ) return b'' . join ( chunks )
Finish the compressor . This will emit the remaining output data and transition the compressor to a completed state . The compressor cannot be used again after this point and must be replaced .
72
34
237,309
def decompress ( self , data ) : chunks = [ ] available_in = ffi . new ( "size_t *" , len ( data ) ) in_buffer = ffi . new ( "uint8_t[]" , data ) next_in = ffi . new ( "uint8_t **" , in_buffer ) while True : # Allocate a buffer that's hopefully overlarge, but if it's not we # don't mind: we'll spin around again. buffer_size = 5 * len ( data ) available_out = ffi . new ( "size_t *" , buffer_size ) out_buffer = ffi . new ( "uint8_t[]" , buffer_size ) next_out = ffi . new ( "uint8_t **" , out_buffer ) rc = lib . BrotliDecoderDecompressStream ( self . _decoder , available_in , next_in , available_out , next_out , ffi . NULL ) # First, check for errors. if rc == lib . BROTLI_DECODER_RESULT_ERROR : error_code = lib . BrotliDecoderGetErrorCode ( self . _decoder ) error_message = lib . BrotliDecoderErrorString ( error_code ) raise Error ( "Decompression error: %s" % ffi . string ( error_message ) ) # Next, copy the result out. chunk = ffi . buffer ( out_buffer , buffer_size - available_out [ 0 ] ) [ : ] chunks . append ( chunk ) if rc == lib . BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT : assert available_in [ 0 ] == 0 break elif rc == lib . BROTLI_DECODER_RESULT_SUCCESS : break else : # It's cool if we need more output, we just loop again. assert rc == lib . BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT return b'' . join ( chunks )
Decompress part of a complete Brotli - compressed string .
451
14
237,310
def finish ( self ) : assert ( lib . BrotliDecoderHasMoreOutput ( self . _decoder ) == lib . BROTLI_FALSE ) if lib . BrotliDecoderIsFinished ( self . _decoder ) == lib . BROTLI_FALSE : raise Error ( "Decompression error: incomplete compressed stream." ) return b''
Finish the decompressor . As the decompressor decompresses eagerly this will never actually emit any data . However it will potentially throw errors if a truncated or damaged data stream has been used .
80
38
237,311
def hsv_to_rgb ( hsv ) : # Algorithm adapted from http://www.cs.rit.edu/~ncs/color/t_convert.html h , s , v = hsv if s == 0 : return ( v , v , v ) h /= 60.0 i = math . floor ( h ) f = h - i p = v * ( 1.0 - s ) q = v * ( 1.0 - s * f ) t = v * ( 1.0 - s * ( 1.0 - f ) ) if i == 0 : return ( v , t , p ) elif i == 1 : return ( q , v , p ) elif i == 2 : return ( p , v , t ) elif i == 3 : return ( p , q , v ) elif i == 4 : return ( t , p , v ) else : return ( v , p , q )
Converts a tuple of hue saturation value to a tuple of red green blue . Hue should be an angle from 0 . 0 to 359 . 0 . Saturation and value should be a value from 0 . 0 to 1 . 0 where saturation controls the intensity of the hue and value controls the brightness .
202
60
237,312
def set_cursor ( self , col , row ) : # Clamp row to the last row of the display. if row > self . _lines : row = self . _lines - 1 # Set location. self . write8 ( LCD_SETDDRAMADDR | ( col + LCD_ROW_OFFSETS [ row ] ) )
Move the cursor to an explicit column and row position .
75
11
237,313
def enable_display ( self , enable ) : if enable : self . displaycontrol |= LCD_DISPLAYON else : self . displaycontrol &= ~ LCD_DISPLAYON self . write8 ( LCD_DISPLAYCONTROL | self . displaycontrol )
Enable or disable the display . Set enable to True to enable .
56
13
237,314
def show_cursor ( self , show ) : if show : self . displaycontrol |= LCD_CURSORON else : self . displaycontrol &= ~ LCD_CURSORON self . write8 ( LCD_DISPLAYCONTROL | self . displaycontrol )
Show or hide the cursor . Cursor is shown if show is True .
61
15
237,315
def blink ( self , blink ) : if blink : self . displaycontrol |= LCD_BLINKON else : self . displaycontrol &= ~ LCD_BLINKON self . write8 ( LCD_DISPLAYCONTROL | self . displaycontrol )
Turn on or off cursor blinking . Set blink to True to enable blinking .
54
15
237,316
def set_left_to_right ( self ) : self . displaymode |= LCD_ENTRYLEFT self . write8 ( LCD_ENTRYMODESET | self . displaymode )
Set text direction left to right .
42
7
237,317
def set_right_to_left ( self ) : self . displaymode &= ~ LCD_ENTRYLEFT self . write8 ( LCD_ENTRYMODESET | self . displaymode )
Set text direction right to left .
43
7
237,318
def autoscroll ( self , autoscroll ) : if autoscroll : self . displaymode |= LCD_ENTRYSHIFTINCREMENT else : self . displaymode &= ~ LCD_ENTRYSHIFTINCREMENT self . write8 ( LCD_ENTRYMODESET | self . displaymode )
Autoscroll will right justify text from the cursor if set True otherwise it will left justify the text .
68
21
237,319
def message ( self , text ) : line = 0 # Iterate through each character. for char in text : # Advance to next line if character is a new line. if char == '\n' : line += 1 # Move to left or right side depending on text direction. col = 0 if self . displaymode & LCD_ENTRYLEFT > 0 else self . _cols - 1 self . set_cursor ( col , line ) # Write the character to the display. else : self . write8 ( ord ( char ) , True )
Write text to display . Note that text can include newlines .
116
13
237,320
def findExp ( self , data ) : temp = [ ] for r in self . reg_exp : try : temp += re . findall ( r , data ) except : print self . name print r print "CABOOOOM!" verifiedExp = [ ] # verification for t in temp : # Remember: the regexps include two extra charactes (before and later) that should be removed now. if self . isValidExp ( t ) : if t not in verifiedExp : verifiedExp . append ( t ) return self . getResults ( verifiedExp )
Method to look for the current regular expression in the provided string .
118
13
237,321
def exportUsufy ( data , ext , fileH ) : if ext == "csv" : usufyToCsvExport ( data , fileH + "." + ext ) elif ext == "gml" : usufyToGmlExport ( data , fileH + "." + ext ) elif ext == "json" : usufyToJsonExport ( data , fileH + "." + ext ) elif ext == "ods" : usufyToOdsExport ( data , fileH + "." + ext ) elif ext == "png" : usufyToPngExport ( data , fileH + "." + ext ) elif ext == "txt" : usufyToTextExport ( data , fileH + "." + ext ) elif ext == "xls" : usufyToXlsExport ( data , fileH + "." + ext ) elif ext == "xlsx" : usufyToXlsxExport ( data , fileH + "." + ext )
Method that exports the different structures onto different formats .
225
10
237,322
def usufyToJsonExport ( d , fPath ) : oldData = [ ] try : with open ( fPath ) as iF : oldText = iF . read ( ) if oldText != "" : oldData = json . loads ( oldText ) except : # No file found, so we will create it... pass jsonText = json . dumps ( oldData + d , indent = 2 , sort_keys = True ) with open ( fPath , "w" ) as oF : oF . write ( jsonText )
Workaround to export to a json file .
115
9
237,323
def usufyToTextExport ( d , fPath = None ) : # Manual check... if d == [ ] : return "+------------------+\n| No data found... |\n+------------------+" import pyexcel as pe import pyexcel . ext . text as text if fPath == None : isTerminal = True else : isTerminal = False try : oldData = get_data ( fPath ) except : # No information has been recovered oldData = { "OSRFramework" : [ ] } # Generating the new tabular data tabularData = _generateTabularData ( d , { "OSRFramework" : [ [ ] ] } , True , canUnicode = False ) # The tabular data contains a dict representing the whole book and we need only the sheet!! sheet = pe . Sheet ( tabularData [ "OSRFramework" ] ) sheet . name = "Profiles recovered (" + getCurrentStrDatetime ( ) + ")." # Defining the headers sheet . name_columns_by_row ( 0 ) text . TABLEFMT = "grid" try : with open ( fPath , "w" ) as oF : oF . write ( str ( sheet ) ) except Exception as e : # If a fPath was not provided... We will only print the info: return unicode ( sheet )
Workaround to export to a . txt file or to show the information .
293
16
237,324
def usufyToCsvExport ( d , fPath ) : from pyexcel_io import get_data try : oldData = { "OSRFramework" : get_data ( fPath ) } except : # No information has been recovered oldData = { "OSRFramework" : [ ] } # Generating the new tabular data. tabularData = _generateTabularData ( d , oldData ) from pyexcel_io import save_data # Storing the file # NOTE: when working with CSV files it is no longer a dict because it is a one-sheet-format save_data ( fPath , tabularData [ "OSRFramework" ] )
Workaround to export to a CSV file .
150
9
237,325
def usufyToOdsExport ( d , fPath ) : from pyexcel_ods import get_data try : #oldData = get_data(fPath) # A change in the API now returns only an array of arrays if there is only one sheet. oldData = { "OSRFramework" : get_data ( fPath ) } except : # No information has been recovered oldData = { "OSRFramework" : [ ] } # Generating the new tabular data tabularData = _generateTabularData ( d , oldData ) from pyexcel_ods import save_data # Storing the file save_data ( fPath , tabularData )
Workaround to export to a . ods file .
149
11
237,326
def usufyToXlsExport ( d , fPath ) : from pyexcel_xls import get_data try : #oldData = get_data(fPath) # A change in the API now returns only an array of arrays if there is only one sheet. oldData = { "OSRFramework" : get_data ( fPath ) } except : # No information has been recovered oldData = { "OSRFramework" : [ ] } # Generating the new tabular data tabularData = _generateTabularData ( d , oldData ) from pyexcel_xls import save_data # Storing the file save_data ( fPath , tabularData )
Workaround to export to a . xls file .
151
11
237,327
def usufyToXlsxExport ( d , fPath ) : from pyexcel_xlsx import get_data try : #oldData = get_data(fPath) # A change in the API now returns only an array of arrays if there is only one sheet. oldData = { "OSRFramework" : get_data ( fPath ) } except : # No information has been recovered oldData = { "OSRFramework" : [ ] } # Generating the new tabular data tabularData = _generateTabularData ( d , oldData ) from pyexcel_xlsx import save_data # Storing the file save_data ( fPath , tabularData )
Workaround to export to a . xlsx file .
154
12
237,328
def usufyToGmlExport ( d , fPath ) : # Reading the previous gml file try : oldData = nx . read_gml ( fPath ) except UnicodeDecodeError as e : print ( "UnicodeDecodeError:\t" + str ( e ) ) print ( "Something went wrong when reading the .gml file relating to the decoding of UNICODE." ) import time as time fPath += "_" + str ( time . time ( ) ) print ( "To avoid losing data, the output file will be renamed to use the timestamp as:\n" + fPath + "_" + str ( time . time ( ) ) ) print ( ) # No information has been recovered oldData = nx . Graph ( ) except Exception as e : # No information has been recovered oldData = nx . Graph ( ) newGraph = _generateGraphData ( d , oldData ) # Writing the gml file nx . write_gml ( newGraph , fPath )
Workaround to export data to a . gml file .
215
12
237,329
def usufyToPngExport ( d , fPath ) : newGraph = _generateGraphData ( d ) import matplotlib . pyplot as plt # Writing the png file nx . draw ( newGraph ) plt . savefig ( fPath )
Workaround to export to a png file .
59
10
237,330
def fileToMD5 ( filename , block_size = 256 * 128 , binary = False ) : md5 = hashlib . md5 ( ) with open ( filename , 'rb' ) as f : for chunk in iter ( lambda : f . read ( block_size ) , b'' ) : md5 . update ( chunk ) if not binary : return md5 . hexdigest ( ) return md5 . digest ( )
A function that calculates the MD5 hash of a file .
90
12
237,331
def getCurrentStrDatetime ( ) : # Generating current time i = datetime . datetime . now ( ) strTime = "%s-%s-%s_%sh%sm" % ( i . year , i . month , i . day , i . hour , i . minute ) return strTime
Generating the current Datetime with a given format
67
10
237,332
def getFilesFromAFolder ( path ) : from os import listdir from os . path import isfile , join #onlyfiles = [ f for f in listdir(path) if isfile(join(path,f)) ] onlyFiles = [ ] for f in listdir ( path ) : if isfile ( join ( path , f ) ) : onlyFiles . append ( f ) return onlyFiles
Getting all the files in a folder .
85
8
237,333
def urisToBrowser ( uris = [ ] , autoraise = True ) : # Cloning stdout (1) and stderr (2) savout1 = os . dup ( 1 ) savout2 = os . dup ( 2 ) # Closing them os . close ( 1 ) os . close ( 2 ) os . open ( os . devnull , os . O_RDWR ) try : for uri in uris : # Opening the Tor URI using onion.cab proxy if ".onion" in uri : wb . open ( uri . replace ( ".onion" , ".onion.city" ) , new = 2 , autoraise = autoraise ) else : wb . open ( uri , new = 2 , autoraise = autoraise ) finally : # Reopening them... os . dup2 ( savout1 , 1 ) os . dup2 ( savout2 , 2 )
Method that launches the URI in the default browser of the system
200
12
237,334
def openResultsInBrowser ( res ) : print ( emphasis ( "\n\tOpening URIs in the default web browser..." ) ) urisToBrowser ( [ "https://github.com/i3visio/osrframework" ] , autoraise = False ) # Waiting 2 seconds to confirm that the browser is opened and prevent the OS from opening several windows time . sleep ( 2 ) uris = [ ] for r in res : for att in r [ "attributes" ] : if att [ "type" ] == "i3visio.uri" : uris . append ( att [ "value" ] ) urisToBrowser ( uris )
Method that collects the URI from a list of entities and opens them
141
13
237,335
def colorize ( text , messageType = None ) : formattedText = str ( text ) # Set colors if "ERROR" in messageType : formattedText = colorama . Fore . RED + formattedText elif "WARNING" in messageType : formattedText = colorama . Fore . YELLOW + formattedText elif "SUCCESS" in messageType : formattedText = colorama . Fore . GREEN + formattedText elif "INFO" in messageType : formattedText = colorama . Fore . BLUE + formattedText # Set emphashis mode if "BOLD" in messageType : formattedText = colorama . Style . BRIGHT + formattedText return formattedText + colorama . Style . RESET_ALL
Function that colorizes a message .
151
7
237,336
def showLicense ( ) : print ( "Trying to recover the contents of the license...\n" ) try : # Grab the license online and print it. text = urllib . urlopen ( LICENSE_URL ) . read ( ) print ( "License retrieved from " + emphasis ( LICENSE_URL ) + "." ) raw_input ( "\n\tPress " + emphasis ( "<ENTER>" ) + " to print it.\n" ) print ( text ) except : print ( warning ( "The license could not be downloaded and printed." ) )
Method that prints the license if requested .
121
8
237,337
def expandEntitiesFromEmail ( e ) : # Grabbing the email email = { } email [ "type" ] = "i3visio.email" email [ "value" ] = e email [ "attributes" ] = [ ] # Grabbing the alias alias = { } alias [ "type" ] = "i3visio.alias" alias [ "value" ] = e . split ( "@" ) [ 0 ] alias [ "attributes" ] = [ ] # Grabbing the domain domain = { } domain [ "type" ] = "i3visio.domain" domain [ "value" ] = e . split ( "@" ) [ 1 ] domain [ "attributes" ] = [ ] return [ email , alias , domain ]
Method that receives an email an creates linked entities
162
9
237,338
def getNumberTLD ( ) : total = 0 for typeTld in TLD . keys ( ) : total += len ( TLD [ typeTld ] ) return total
Counting the total number of TLD being processed .
37
11
237,339
def getWhoisInfo ( domain ) : new = [ ] # Grabbing the aliases try : emails = { } emails [ "type" ] = "i3visio.alias" emails [ "value" ] = str ( domain . split ( "." ) [ 0 ] ) emails [ "attributes" ] = [ ] new . append ( emails ) except : pass info = whois . whois ( domain ) if info . status == None : raise Exception ( "UnknownDomainError: " + domain + " could not be resolved." ) # Grabbing the emails try : emails = { } emails [ "type" ] = "i3visio.email" if type ( info . emails ) is not list : aux = [ info . emails ] emails [ "value" ] = json . dumps ( aux ) else : emails [ "value" ] = json . dumps ( info . emails ) emails [ "attributes" ] = [ ] new . append ( emails ) except : pass # Grabbing the country try : tmp = { } tmp [ "type" ] = "i3visio.location.country" tmp [ "value" ] = str ( info . country ) tmp [ "attributes" ] = [ ] new . append ( tmp ) except : pass # Grabbing the regitrar try : tmp = { } tmp [ "type" ] = "i3visio.registrar" tmp [ "value" ] = str ( info . registrar ) tmp [ "attributes" ] = [ ] new . append ( tmp ) except : pass # Grabbing the regitrar try : tmp = { } tmp [ "type" ] = "i3visio.fullname" try : tmp [ "value" ] = str ( info . name ) except : tmp [ "value" ] = info . name tmp [ "attributes" ] = [ ] new . append ( tmp ) except : pass return new
Method that trie to recover the whois info from a domain .
407
14
237,340
def createDomains ( tlds , nicks = None , nicksFile = None ) : domain_candidates = [ ] if nicks != None : for n in nicks : for t in tlds : tmp = { "domain" : n + t [ "tld" ] , "type" : t [ "type" ] , "tld" : t [ "tld" ] } domain_candidates . append ( tmp ) elif nicksFile != None : with open ( nicksFile , "r" ) as iF : nicks = iF . read ( ) . splitlines ( ) for n in nicks : for t in tlds : tmp = { "domain" : n + t [ "tld" ] , "type" : t [ "type" ] , "tld" : t [ "tld" ] } domain_candidates . append ( tmp ) return domain_candidates
Method that globally permits to generate the domains to be checked .
202
12
237,341
def weCanCheckTheseDomains ( email ) : # Known platform not to be working... notWorking = [ "@aol.com" , "@bk.ru" , "@breakthru.com" , "@gmx." , "@hotmail.co" , "@inbox.com" , "@latinmail.com" , "@libero.it" , "@mail.ru" , "@mail2tor.com" , "@outlook.com" , "@rambler.ru" , "@rocketmail.com" , "@starmedia.com" , "@ukr.net" "@yahoo." , "@ymail." ] #notWorking = [] for n in notWorking : if n in email : print ( "\t[*] Verification of '{}' aborted. Details:\n\t\t{}" . format ( general . warning ( email ) , "This domain CANNOT be verified using mailfy." ) ) return False emailDomains = EMAIL_DOMAINS safe = False for e in EMAIL_DOMAINS : if e in email : safe = True if not safe : print ( "\t[*] Verification of '{}' aborted. Details:\n\t\t{}" . format ( general . warning ( email ) , "This domain CANNOT be verified using mailfy." ) ) return False return True
Method that verifies if a domain can be safely verified .
291
12
237,342
def grabEmails ( emails = None , emailsFile = None , nicks = None , nicksFile = None , domains = EMAIL_DOMAINS , excludeDomains = [ ] ) : email_candidates = [ ] if emails != None : email_candidates = emails elif emailsFile != None : # Reading the emails file with open ( emailsFile , "r" ) as iF : email_candidates = iF . read ( ) . splitlines ( ) elif nicks != None : # Iterating the list of nicks for n in nicks : # Iterating the list of possible domains to build the emails for d in domains : if d not in excludeDomains : email_candidates . append ( n + "@" + d ) elif nicksFile != None : # Reading the list of nicks with open ( nicksFile , "r" ) as iF : nicks = iF . read ( ) . splitlines ( ) # Iterating the list of nicks for n in nicks : # Iterating the list of possible domains to build the emails for d in domains : if d not in excludeDomains : email_candidates . append ( n + "@" + d ) return email_candidates
Method that generates a list of emails .
265
8
237,343
def processMailList ( platformNames = [ ] , emails = [ ] ) : # Grabbing the <Platform> objects platforms = platform_selection . getPlatformsByName ( platformNames , mode = "mailfy" ) results = [ ] for e in emails : for pla in platforms : # This returns a json.txt! entities = pla . getInfo ( query = e , mode = "mailfy" ) if entities != { } : results += json . loads ( entities ) return results
Method to perform the email search .
105
7
237,344
def pool_function ( args ) : is_valid = True try : checker = emailahoy . VerifyEmail ( ) status , message = checker . verify_email_smtp ( args , from_host = 'gmail.com' , from_email = 'sample@gmail.com' ) if status == 250 : print ( "\t[*] Verification of '{}' status: {}. Details:\n\t\t{}" . format ( general . success ( args ) , general . success ( "SUCCESS ({})" . format ( str ( status ) ) ) , message . replace ( '\n' , '\n\t\t' ) ) ) is_valid = True else : print ( "\t[*] Verification of '{}' status: {}. Details:\n\t\t{}" . format ( general . error ( args ) , general . error ( "FAILED ({})" . format ( str ( status ) ) ) , message . replace ( '\n' , '\n\t\t' ) ) ) is_valid = False except Exception , e : print ( general . warning ( "WARNING. An error was found when performing the search. You can omit this message.\n" + str ( e ) ) ) is_valid = False aux = { } aux [ "type" ] = "i3visio.profile" aux [ "value" ] = "Email - " + args aux [ "attributes" ] = general . expandEntitiesFromEmail ( args ) platform = aux [ "attributes" ] [ 2 ] [ "value" ] . title ( ) aux [ "attributes" ] . append ( { "type" : "i3visio.platform" , "value" : platform , "attributes" : [ ] } ) if is_valid : return { "platform" : platform , "status" : "DONE" , "data" : aux } else : return { "platform" : platform , "status" : "DONE" , "data" : { } }
A wrapper for being able to launch all the threads .
445
11
237,345
def recoverURL ( self , url ) : # Configuring user agents... self . setUserAgent ( ) # Configuring proxies if "https://" in url : self . setProxy ( protocol = "https" ) else : self . setProxy ( protocol = "http" ) # Giving special treatment for .onion platforms if ".onion" in url : try : # TODO: configuring manually the tor bundle pass except : # TODO: capturing the error and eventually trying the tor2web approach #url = url.replace(".onion", ".tor2web.org") pass url = url . replace ( ".onion" , ".onion.cab" ) # Opening the resource try : recurso = self . br . open ( url ) except : # Something happened. Maybe the request was forbidden? return None html = recurso . read ( ) return html
Public method to recover a resource .
186
7
237,346
def setNewPassword ( self , url , username , password ) : self . br . add_password ( url , username , password )
Public method to manually set the credentials for a url in the browser .
28
14
237,347
def setProxy ( self , protocol = "http" ) : # Setting proxy try : new = { protocol : self . proxies [ protocol ] } self . br . set_proxies ( new ) except : # No proxy defined for that protocol pass
Public method to set a proxy for the browser .
52
10
237,348
def setUserAgent ( self , uA = None ) : logger = logging . getLogger ( "osrframework.utils" ) if not uA : # Setting the User Agents if self . userAgents : # User-Agent (this is cheating, ok?) logger = logging . debug ( "Selecting a new random User Agent." ) uA = random . choice ( self . userAgents ) else : logger = logging . debug ( "No user agent was inserted." ) return False #logger.debug("Setting the user agent:\t" + str(uA)) self . br . addheaders = [ ( 'User-agent' , uA ) , ] #self.br.addheaders = [('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'), ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'), ('Accept-Encoding', 'none'), ('Accept-Language', 'es-es,es;q=0.8'), ('Connection', 'keep-alive')] #self.br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) return True
This method will be called whenever a new query will be executed .
344
13
237,349
def main ( args ) : # Creating the instance tAW = TwitterAPIWrapper ( ) # Selecting the query to be launched if args . type == "get_all_docs" : results = tAW . get_all_docs ( args . query ) elif args . type == "get_user" : results = tAW . get_user ( args . query ) elif args . type == "get_followers" : results = tAW . get_followers ( args . query ) print "... %s followers downloaded... " % ( len ( results ) ) #write the csv with open ( '%s_followers.csv' % args . query , 'wb' ) as f : writer = csv . writer ( f ) for r in results : writer . writerow ( [ args . query , str ( r ) ] ) elif args . type == "get_friends" : results = tAW . get_friends ( args . query ) print "... %s friends downloaded... " % ( len ( results ) ) #write the csv with open ( '%s_friends.csv' % args . query , 'wb' ) as f : writer = csv . writer ( f ) for r in results : writer . writerow ( [ args . query , str ( r ) ] ) elif args . type == "search_users" : results = tAW . search_users ( args . query ) return results
Query manager .
306
3
237,350
def _rate_limit_status ( self , api = None , mode = None ) : if api == None : api = self . connectToAPI ( ) if mode == None : print json . dumps ( api . rate_limit_status ( ) , indent = 2 ) raw_input ( "<Press ENTER>" ) else : # Testing if we have enough queries while True : allLimits = api . rate_limit_status ( ) if mode == "get_user" : limit = allLimits [ "resources" ] [ "users" ] [ "/users/show/:id" ] [ "limit" ] remaining = allLimits [ "resources" ] [ "users" ] [ "/users/show/:id" ] [ "remaining" ] reset = allLimits [ "resources" ] [ "users" ] [ "/users/show/:id" ] [ "reset" ] elif mode == "get_followers" : limit = allLimits [ "resources" ] [ "followers" ] [ "/followers/ids" ] [ "limit" ] remaining = allLimits [ "resources" ] [ "followers" ] [ "/followers/ids" ] [ "remaining" ] reset = allLimits [ "resources" ] [ "followers" ] [ "/followers/ids" ] [ "reset" ] elif mode == "get_friends" : limit = allLimits [ "resources" ] [ "friends" ] [ "/friends/ids" ] [ "limit" ] remaining = allLimits [ "resources" ] [ "friends" ] [ "/friends/ids" ] [ "remaining" ] reset = allLimits [ "resources" ] [ "friends" ] [ "/friends/ids" ] [ "reset" ] elif mode == "search_users" : limit = allLimits [ "resources" ] [ "users" ] [ "/users/search" ] [ "limit" ] remaining = allLimits [ "resources" ] [ "users" ] [ "/users/search" ] [ "remaining" ] reset = allLimits [ "resources" ] [ "users" ] [ "/users/search" ] [ "reset" ] else : remaining = 1 """elif mode == "get_all_docs": limit = allLimits["resources"]REPLACEME["limit"] remaining = allLimits["resources"]REPLACEME["remaining"] reset = allLimits["resources"]REPLACEME["reset"]""" """elif mode == "get_users": limit = allLimits["resources"]REPLACEME["limit"] remaining = allLimits["resources"]REPLACEME["remaining"] reset = allLimits["resources"]REPLACEME["reset"] """ """else: remaining = 1""" # Checking if we have enough remaining queries if remaining > 0 : #raw_input(str(remaining) + " queries yet...") break else : waitTime = 60 print "No more queries remaining, sleeping for " + str ( waitTime ) + " seconds..." time . sleep ( waitTime ) return 0
Verifying the API limits
673
5
237,351
def get_followers ( self , query ) : # Connecting to the API api = self . _connectToAPI ( ) # Verifying the limits of the API self . _rate_limit_status ( api = api , mode = "get_followers" ) # Making the call to the API try : friends_ids = api . followers_ids ( query ) except : return [ ] """res = [] # Extracting the information from each profile for a in aux: us= self.getUser(a) res.append(self._processUser(us))""" return friends_ids
Method to get the followers of a user .
125
9
237,352
def get_friends ( self , query ) : # Connecting to the API api = self . _connectToAPI ( ) # Verifying the limits of the API self . _rate_limit_status ( api = api , mode = "get_friends" ) # Making the call to the API try : friends_ids = api . friends_ids ( query ) except : return [ ] """res = [] # Extracting the information from each profile for a in aux: us= self.getUser(a) res.append(self._processUser(us))""" return friends_ids
Method to get the friends of a user .
123
9
237,353
def get_user ( self , screen_name ) : # Connecting to the API api = self . _connectToAPI ( ) # Verifying the limits of the API self . _rate_limit_status ( api = api , mode = "get_user" ) aux = [ ] try : user = api . get_user ( screen_name ) # Iterate through the results using user._json aux . append ( user . _json ) except tweepy . error . TweepError as e : pass res = [ ] # Extracting the information from each profile for a in aux : res . append ( self . _processUser ( a ) ) return res
Method to perform the usufy searches .
140
9
237,354
def search_users ( self , query , n = 20 , maxUsers = 60 ) : # Connecting to the API api = self . _connectToAPI ( ) # Verifying the limits of the API self . _rate_limit_status ( api = api , mode = "search_users" ) aux = [ ] page = 0 # print "Getting page %s of new users..." % page+1 # Making the call to the API try : newUsers = api . search_users ( query , n , page ) for n in newUsers : aux . append ( n . _json ) #keep grabbing tweets until there are no tweets left to grab while len ( aux ) < maxUsers & len ( newUsers ) > 0 : page += 1 print "Getting page %s of new users..." % page # Grabbing new Users newUsers = api . search_users ( query , n , page ) # Save the users found aux . extend ( newUsers ) except : pass res = [ ] # Extracting the information from each profile for a in aux : res . append ( self . _processUser ( a ) ) return res
Method to perform the searchfy searches .
236
8
237,355
def validate_categories ( categories ) : if not set ( categories ) <= Source . categories : invalid = list ( set ( categories ) - Source . categories ) raise ValueError ( 'Invalid categories: %s' % invalid )
Take an iterable of source categories and raise ValueError if some of them are invalid .
47
18
237,356
def checkIfHashIsCracked ( hash = None ) : apiURL = "http://md5db.net/api/" + str ( hash ) . lower ( ) try : # Getting the result of the query from MD5db.net data = urllib2 . urlopen ( apiURL ) . read ( ) return data except : # No information was found, then we return a null entity return [ ]
Method that checks if the given hash is stored in the md5db . net website .
87
18
237,357
def _prepare_filtering_params ( domain = None , category = None , sponsored_source = None , has_field = None , has_fields = None , query_params_match = None , query_person_match = None , * * kwargs ) : if query_params_match not in ( None , True ) : raise ValueError ( 'query_params_match can only be `True`' ) if query_person_match not in ( None , True ) : raise ValueError ( 'query_person_match can only be `True`' ) params = [ ] if domain is not None : params . append ( 'domain:%s' % domain ) if category is not None : Source . validate_categories ( [ category ] ) params . append ( 'category:%s' % category ) if sponsored_source is not None : params . append ( 'sponsored_source:%s' % sponsored_source ) if query_params_match is not None : params . append ( 'query_params_match' ) if query_person_match is not None : params . append ( 'query_person_match' ) has_fields = has_fields or [ ] if has_field is not None : has_fields . append ( has_field ) for has_field in has_fields : params . append ( 'has_field:%s' % has_field . __name__ ) return params
Transform the params to the API format return a list of params .
305
13
237,358
def validate_query_params ( self , strict = True ) : if not ( self . api_key or default_api_key ) : raise ValueError ( 'API key is missing' ) if strict and self . query_params_mode not in ( None , 'and' , 'or' ) : raise ValueError ( 'query_params_match should be one of "and"/"or"' ) if not self . person . is_searchable : raise ValueError ( 'No valid name/username/phone/email in request' ) if strict and self . person . unsearchable_fields : raise ValueError ( 'Some fields are unsearchable: %s' % self . person . unsearchable_fields )
Check if the request is valid and can be sent raise ValueError if not . strict is a boolean argument that defaults to True which means an exception is raised on every invalid query parameter if set to False an exception is raised only when the search request cannot be performed because required query params are missing .
154
59
237,359
def group_records_by_domain ( self ) : key_function = lambda record : record . source . domain return self . group_records ( key_function )
Return the records grouped by the domain they came from . The return value is a dict a key in this dict is a domain and the value is a list of all the records with this domain .
37
39
237,360
def group_records_by_category ( self ) : Source . validate_categories ( categories ) key_function = lambda record : record . source . category return self . group_records ( key_function )
Return the records grouped by the category of their source . The return value is a dict a key in this dict is a category and the value is a list of all the records with this category .
46
39
237,361
def from_dict ( d ) : warnings_ = d . get ( 'warnings' , [ ] ) query = d . get ( 'query' ) or None if query : query = Person . from_dict ( query ) person = d . get ( 'person' ) or None if person : person = Person . from_dict ( person ) records = d . get ( 'records' ) if records : records = [ Record . from_dict ( record ) for record in records ] suggested_searches = d . get ( 'suggested_searches' ) if suggested_searches : suggested_searches = [ Record . from_dict ( record ) for record in suggested_searches ] return SearchAPIResponse ( query = query , person = person , records = records , suggested_searches = suggested_searches , warnings_ = warnings_ )
Transform the dict to a response object and return the response .
190
12
237,362
def to_dict ( self ) : d = { } if self . warnings : d [ 'warnings' ] = self . warnings if self . query is not None : d [ 'query' ] = self . query . to_dict ( ) if self . person is not None : d [ 'person' ] = self . person . to_dict ( ) if self . records : d [ 'records' ] = [ record . to_dict ( ) for record in self . records ] if self . suggested_searches : d [ 'suggested_searches' ] = [ record . to_dict ( ) for record in self . suggested_searches ] return d
Return a dict representation of the response .
146
8
237,363
def from_dict ( cls , d ) : kwargs = { } for key , val in d . iteritems ( ) : if key . startswith ( 'display' ) : # includes phone.display_international continue if key . startswith ( '@' ) : key = key [ 1 : ] if key == 'type' : key = 'type_' elif key == 'valid_since' : val = str_to_datetime ( val ) elif key == 'date_range' : val = DateRange . from_dict ( val ) kwargs [ key . encode ( 'ascii' ) ] = val return cls ( * * kwargs )
Transform the dict to a field object and return the field .
150
12
237,364
def to_dict ( self ) : d = { } if self . valid_since is not None : d [ '@valid_since' ] = datetime_to_str ( self . valid_since ) for attr_list , prefix in [ ( self . attributes , '@' ) , ( self . children , '' ) ] : for attr in attr_list : value = getattr ( self , attr ) if isinstance ( value , Serializable ) : value = value . to_dict ( ) if value or isinstance ( value , ( bool , int , long ) ) : d [ prefix + attr ] = value if hasattr ( self , 'display' ) and self . display : d [ 'display' ] = self . display return d
Return a dict representation of the field .
164
8
237,365
def is_searchable ( self ) : first = alpha_chars ( self . first or u'' ) last = alpha_chars ( self . last or u'' ) raw = alpha_chars ( self . raw or u'' ) return ( len ( first ) >= 2 and len ( last ) >= 2 ) or len ( raw ) >= 4
A bool value that indicates whether the name is a valid name to search by .
74
16
237,366
def is_searchable ( self ) : return self . raw or ( self . is_valid_country and ( not self . state or self . is_valid_state ) )
A bool value that indicates whether the address is a valid address to search by .
38
16
237,367
def is_valid_state ( self ) : return self . is_valid_country and self . country . upper ( ) in STATES and self . state is not None and self . state . upper ( ) in STATES [ self . country . upper ( ) ]
A bool value that indicates whether the object s state is a valid state code .
54
16
237,368
def to_dict ( self ) : d = Field . to_dict ( self ) if self . display_international : d [ 'display_international' ] = self . display_international return d
Extend Field . to_dict take the display_international attribute .
41
14
237,369
def is_valid_email ( self ) : return bool ( self . address and Email . re_email . match ( self . address ) )
A bool value that indicates whether the address is a valid email address . Note that the check is done be matching to the regular expression at Email . re_email which is very basic and far from covering end - cases ...
30
44
237,370
def age ( self ) : if self . date_range is None : return dob = self . date_range . middle today = datetime . date . today ( ) if ( today . month , today . day ) < ( dob . month , dob . day ) : return today . year - dob . year - 1 else : return today . year - dob . year
int the estimated age of the person . Note that A DOB object is based on a date - range and the exact date is usually unknown so for age calculation the the middle of the range is assumed to be the real date - of - birth .
81
50
237,371
def age_range ( self ) : if self . date_range is None : return None , None start_date = DateRange ( self . date_range . start , self . date_range . start ) end_date = DateRange ( self . date_range . end , self . date_range . end ) start_age = DOB ( date_range = end_date ) . age end_age = DOB ( date_range = start_date ) . age return start_age , end_age
A tuple of two ints - the minimum and maximum age of the person .
109
16
237,372
def from_age_range ( start_age , end_age ) : if start_age < 0 or end_age < 0 : raise ValueError ( 'start_age and end_age can\'t be negative' ) if start_age > end_age : start_age , end_age = end_age , start_age today = datetime . date . today ( ) try : start_date = today . replace ( year = today . year - end_age - 1 ) except ValueError : # February 29 start_date = today . replace ( year = today . year - end_age - 1 , day = 28 ) start_date += datetime . timedelta ( days = 1 ) try : end_date = today . replace ( year = today . year - start_age ) except ValueError : # February 29 end_date = today . replace ( year = today . year - start_age , day = 28 ) date_range = DateRange ( start_date , end_date ) return DOB ( date_range = date_range )
Take a person s minimal and maximal age and return a new DOB object suitable for him .
225
19
237,373
def from_dict ( cls , d ) : relationship = super ( cls , cls ) . from_dict ( d ) if relationship . name is not None : relationship . name = Name . from_dict ( relationship . name ) return relationship
Extend Field . from_dict and also load the name from the dict .
52
16
237,374
def from_dict ( d ) : start = d . get ( 'start' ) end = d . get ( 'end' ) if not ( start and end ) : raise ValueError ( 'DateRange must have both start and end' ) start = str_to_date ( start ) end = str_to_date ( end ) return DateRange ( start , end )
Transform the dict to a DateRange object .
79
9
237,375
def to_dict ( self ) : d = { } d [ 'start' ] = date_to_str ( self . start ) d [ 'end' ] = date_to_str ( self . end ) return d
Transform the date - range to a dict .
48
9
237,376
def enumerateURL ( urlDict , outputFolder , startIndex = 0 , maxErrors = 100 ) : for i , url in enumerate ( urlDict . keys ( ) ) : # Grabbing domain name: domain = re . findall ( "://(.*)/" , url ) [ 0 ] # Defining the starting index index = startIndex # The app will stop when this value reaches maxErrors consecutiveErrors = 0 i3Browser = browser . Browser ( ) # Main loop that checks if the maximum number of errors has been reached while consecutiveErrors <= maxErrors : # creating the new URL to download newQuery = url . replace ( "<INDEX>" , str ( index ) ) print ( newQuery ) # Downloading the file try : data = i3Browser . recoverURL ( newQuery ) filename = domain . replace ( "/" , "|" ) + "_" + "-profile_" + str ( index ) . rjust ( 10 , "0" ) + ".html" if urlDict [ url ] != None : if urlDict [ url ] in data : print ( general . info ( "Storing resource as:\t" + filename + "..." ) ) # The profile was found so we will store it: with open ( outputFolder + "/" + filename , "w" ) as oF : oF . write ( data ) else : # The profile was found so we will store it: print ( general . info ( "Storing resource as:\t" + filename + "..." ) ) with open ( outputFolder + "/" + filename , "w" ) as oF : oF . write ( data ) except : pass #logger.error("The resource could not be downloaded.") index += 1
Function that performs the enumeration itself .
370
8
237,377
def checkIfEmailWasHacked ( email = None , sleepSeconds = 1 ) : # Sleeping just a little bit time . sleep ( sleepSeconds ) print ( "\t[*] Bypassing Cloudflare Restriction..." ) ua = 'osrframework 0.18' useragent = { 'User-Agent' : ua } cookies , user_agent = cfscrape . get_tokens ( 'https://haveibeenpwned.com/api/v2/breachedaccount/test@example.com' , user_agent = ua ) leaks = [ ] apiURL = "https://haveibeenpwned.com/api/v2/breachedaccount/{}" . format ( email ) # Accessing the HIBP API time . sleep ( sleepSeconds ) # Building API query data = requests . get ( apiURL , headers = useragent , cookies = cookies , verify = True ) . text # Reading the text data onto python structures try : jsonData = json . loads ( data ) for e in jsonData : # Building the i3visio like structure new = { } new [ "value" ] = "(HIBP) " + e . get ( "Name" ) + " - " + email new [ "type" ] = "i3visio.profile" new [ "attributes" ] = [ { "type" : "i3visio.platform_leaked" , "value" : e . get ( "Name" ) , "attributes" : [ ] } , { "type" : "@source" , "value" : "haveibeenpwned.com" , "attributes" : [ ] } , { "type" : "@source_uri" , "value" : apiURL , "attributes" : [ ] } , { "type" : "@pwn_count" , "value" : e . get ( "PwnCount" ) , "attributes" : [ ] } , { "type" : "@added_date" , "value" : e . get ( "AddedDate" ) , "attributes" : [ ] } , { "type" : "@breach_date" , "value" : e . get ( "BreachDate" ) , "attributes" : [ ] } , { "type" : "@description" , "value" : e . get ( "Description" ) , "attributes" : [ ] } ] + general . expandEntitiesFromEmail ( email ) leaks . append ( new ) except ValueError : return [ ] except Exception : print ( "ERROR: Something happenned when using HIBP API." ) return [ ] return leaks
Method that checks if the given email is stored in the HIBP website .
578
16
237,378
def get_page ( url ) : request = Request ( url ) request . add_header ( 'User-Agent' , 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)' ) cookie_jar . add_cookie_header ( request ) response = urlopen ( request ) cookie_jar . extract_cookies ( response , request ) html = response . read ( ) response . close ( ) cookie_jar . save ( ) return html
Request the given URL and return the response page using the cookie jar .
103
14
237,379
def search ( query , tld = 'com' , lang = 'en' , num = 10 , start = 0 , stop = None , pause = 2.0 , only_standard = False ) : # Lazy import of BeautifulSoup. # Try to use BeautifulSoup 4 if available, fall back to 3 otherwise. global BeautifulSoup if BeautifulSoup is None : try : from bs4 import BeautifulSoup except ImportError : from BeautifulSoup import BeautifulSoup # Set of hashes for the results found. # This is used to avoid repeated results. hashes = set ( ) # Prepare the search string. query = quote_plus ( query ) # Grab the cookie from the home page. get_page ( url_home % vars ( ) ) # Prepare the URL of the first request. if start : if num == 10 : url = url_next_page % vars ( ) else : url = url_next_page_num % vars ( ) else : if num == 10 : url = url_search % vars ( ) else : url = url_search_num % vars ( ) # Loop until we reach the maximum result, if any (otherwise, loop forever). while not stop or start < stop : # Sleep between requests. time . sleep ( pause ) # Request the Google Search results page. html = get_page ( url ) # Parse the response and process every anchored URL. soup = BeautifulSoup ( html ) anchors = soup . find ( id = 'search' ) . findAll ( 'a' ) for a in anchors : # Leave only the "standard" results if requested. # Otherwise grab all possible links. if only_standard and ( not a . parent or a . parent . name . lower ( ) != "h3" ) : continue # Get the URL from the anchor tag. try : link = a [ 'href' ] except KeyError : continue # Filter invalid links and links pointing to Google itself. link = filter_result ( link ) if not link : continue # Discard repeated results. h = hash ( link ) if h in hashes : continue hashes . add ( h ) # Yield the result. yield link # End if there are no more results. if not soup . find ( id = 'nav' ) : break # Prepare the URL for the next request. start += num if num == 10 : url = url_next_page % vars ( ) else : url = url_next_page_num % vars ( )
Search the given query string using Google .
528
8
237,380
def add_fields ( self , fields ) : for field in fields : cls = field . __class__ try : container = FieldsContainer . class_container [ cls ] except KeyError : raise ValueError ( 'Object of type %s is an invalid field' % cls ) getattr ( self , container ) . append ( field )
Add the fields to their corresponding container . fields is an iterable of field objects from osrframework . thirdparties . pipl_com . lib . fields .
72
34
237,381
def all_fields ( self ) : return [ field for container in FieldsContainer . class_container . values ( ) for field in getattr ( self , container ) ]
A list with all the fields contained in this object .
35
11
237,382
def fields_from_dict ( d ) : class_container = FieldsContainer . class_container fields = [ field_cls . from_dict ( field_dict ) for field_cls , container in class_container . iteritems ( ) for field_dict in d . get ( container , [ ] ) ] return fields
Load the fields from the dict return a list with all the fields .
69
14
237,383
def fields_to_dict ( self ) : d = { } for container in FieldsContainer . class_container . values ( ) : fields = getattr ( self , container ) if fields : d [ container ] = [ field . to_dict ( ) for field in fields ] return d
Transform the object to a dict and return the dict .
60
11
237,384
def from_dict ( d ) : query_params_match = d . get ( '@query_params_match' ) query_person_match = d . get ( '@query_person_match' ) valid_since = d . get ( '@valid_since' ) if valid_since : valid_since = str_to_datetime ( valid_since ) source = Source . from_dict ( d . get ( 'source' , { } ) ) fields = Record . fields_from_dict ( d ) return Record ( source = source , fields = fields , query_params_match = query_params_match , query_person_match = query_person_match , valid_since = valid_since )
Transform the dict to a record object and return the record .
156
12
237,385
def to_dict ( self ) : d = { } if self . query_params_match is not None : d [ '@query_params_match' ] = self . query_params_match if self . query_person_match is not None : d [ '@query_person_match' ] = self . query_person_match if self . valid_since is not None : d [ '@valid_since' ] = datetime_to_str ( self . valid_since ) if self . source is not None : d [ 'source' ] = self . source . to_dict ( ) d . update ( self . fields_to_dict ( ) ) return d
Return a dict representation of the record .
147
8
237,386
def is_searchable ( self ) : filter_func = lambda field : field . is_searchable return bool ( filter ( filter_func , self . names ) or filter ( filter_func , self . emails ) or filter ( filter_func , self . phones ) or filter ( filter_func , self . usernames ) )
A bool value that indicates whether the person has enough data and can be sent as a query to the API .
71
22
237,387
def from_dict ( d ) : query_params_match = d . get ( '@query_params_match' ) sources = [ Source . from_dict ( source ) for source in d . get ( 'sources' , [ ] ) ] fields = Person . fields_from_dict ( d ) return Person ( fields = fields , sources = sources , query_params_match = query_params_match )
Transform the dict to a person object and return the person .
89
12
237,388
def to_dict ( self ) : d = { } if self . query_params_match is not None : d [ '@query_params_match' ] = self . query_params_match if self . sources : d [ 'sources' ] = [ source . to_dict ( ) for source in self . sources ] d . update ( self . fields_to_dict ( ) ) return d
Return a dict representation of the person .
87
8
237,389
def processPhoneList ( platformNames = [ ] , numbers = [ ] , excludePlatformNames = [ ] ) : # Grabbing the <Platform> objects platforms = platform_selection . getPlatformsByName ( platformNames , mode = "phonefy" , excludePlatformNames = excludePlatformNames ) results = [ ] for num in numbers : for pla in platforms : # This returns a json.txt! entities = pla . getInfo ( query = num , process = True , mode = "phonefy" ) if entities != { } : results += json . loads ( entities ) return results
Method to perform searchs on a series of numbers .
124
11
237,390
def createURL ( self , word , mode = "phonefy" ) : try : return self . modes [ mode ] [ "url" ] . format ( placeholder = urllib . pathname2url ( word ) ) except : if mode == "base" : if word [ 0 ] == "/" : return self . baseURL + word [ 1 : ] , word else : return self . baseURL + word else : try : return self . url [ mode ] . replace ( "<" + mode + ">" , urllib . pathname2url ( word ) ) except : pass return None
Method to create the URL replacing the word in the appropriate URL .
126
13
237,391
def launchQueryForMode ( self , query = None , mode = None ) : # Creating the query URL for that mode qURL = self . createURL ( word = query , mode = mode ) i3Browser = browser . Browser ( ) try : # Check if it needs creds if self . needsCredentials [ mode ] : self . _getAuthenticated ( i3Browser , qURL ) data = i3Browser . recoverURL ( qURL ) else : # Accessing the resources data = i3Browser . recoverURL ( qURL ) return data except KeyError : print ( general . error ( "[*] '{}' is not a valid mode for this wrapper ({})." . format ( mode , self . __class__ . __name__ ) ) ) return None
Method that launches an i3Browser to collect data .
164
11
237,392
def getInfo ( self , query = None , process = False , mode = "phonefy" , qURI = None ) : results = [ ] data = "" if self . _modeIsValid ( mode = mode ) and self . _isValidQuery ( query , mode = mode ) : if mode in [ "mailfy" , "phonefy" , "searchfy" , "usufy" ] : try : results = getattr ( self , "do_{}" . format ( mode ) ) ( query ) except AttributeError as e : raise NotImplementedModeError ( str ( self ) , mode ) return json . dumps ( results )
Method that checks the presence of a given query and recovers the first list of complains .
138
17
237,393
def _modeIsValid ( self , mode ) : try : # Suport for version 2 of wrappers return mode in self . modes . keys ( ) except AttributeError as e : # Legacy for mantaining old wrappers if mode in self . isValidMode . keys ( ) : if mode in self . isValidMode . keys ( ) : return True return False
Verification of whether the mode is a correct option to be used .
77
14
237,394
def _getAuthenticated ( self , browser , url ) : # check if we have creds try : if len ( self . creds ) > 0 : # TODO: in choosing a cred there is an uneeded nesting of arrays c = random . choice ( self . creds ) [ 0 ] # adding the credential browser . setNewPassword ( url , c . user , c . password ) return True else : raise NoCredentialsException ( str ( self ) ) except AttributeError as e : raise BadImplementationError ( str ( e ) )
Getting authenticated .
118
3
237,395
def _isValidQuery ( self , query , mode = "phonefy" ) : try : # Suport for version 2 of wrappers validator = self . modes [ mode ] . get ( "query_validator" ) if validator : try : compiledRegexp = re . compile ( "^{expr}$" . format ( expr = validator ) ) return compiledRegexp . match ( query ) except AttributeError as e : return True except AttributeError as e : # Legacy for mantaining old wrappers compiledRegexp = re . compile ( "^{r}$" . format ( r = self . validQuery [ mode ] ) ) return compiledRegexp . match ( query )
Method to verify if a given query is processable by the platform .
151
14
237,396
def _somethingFound ( self , data , mode = "phonefy" ) : if data : try : for text in self . notFoundText [ mode ] : if text in data : return False return True except AttributeError as e : # Update to version 2 of the wrappers. verifier = self . modes . get ( mode ) if verifier : if verifier . get ( "not_found_text" , "" ) in data : return False else : return True return False
Verifying if something was found .
102
7
237,397
def do_phonefy ( self , query , * * kwargs ) : results = [ ] test = self . check_phonefy ( query , kwargs ) if test : r = { "type" : "i3visio.phone" , "value" : self . platformName + " - " + query , "attributes" : [ ] } try : aux = { "type" : "i3visio.uri" , "value" : self . createURL ( query , mode = "phonefy" ) , "attributes" : [ ] } r [ "attributes" ] . append ( aux ) except : pass aux = { "type" : "i3visio.platform" , "value" : self . platformName , "attributes" : [ ] } r [ "attributes" ] . append ( aux ) # V2 of the wrappers r [ "attributes" ] += self . process_phonefy ( test ) results . append ( r ) return results
Verifying a phonefy query in this platform .
215
10
237,398
def process_usufy ( self , data ) : mode = "usufy" info = [ ] try : # v2 verifier = self . modes . get ( mode , { } ) . get ( "extra_fields" , { } ) for field in verifier . keys ( ) : regexp = verifier [ field ] values = re . findall ( regexp , data ) for val in values : aux = { } aux [ "type" ] = field aux [ "value" ] = val aux [ "attributes" ] = [ ] if aux not in info : info . append ( aux ) except AttributeError as e : # Legacy for field in self . fieldsRegExp [ mode ] . keys ( ) : # Recovering the RegularExpression try : # Using the old approach of "Start" + "End" regexp = self . fieldsRegExp [ mode ] [ field ] [ "start" ] + "([^\)]+)" + self . fieldsRegExp [ mode ] [ field ] [ "end" ] tmp = re . findall ( regexp , data ) # Now we are performing an operation just in case the "end" tag is found in the results, which would mean that the tag selected matches something longer in the data. values = [ ] for t in tmp : if self . fieldsRegExp [ mode ] [ field ] [ "end" ] in t : values . append ( t . split ( self . fieldsRegExp [ mode ] [ field ] [ "end" ] ) [ 0 ] ) else : values . append ( t ) except : # Using the compact approach if start and end tags do not exist. regexp = self . fieldsRegExp [ mode ] [ field ] values = re . findall ( regexp , data ) for val in values : aux = { } aux [ "type" ] = field aux [ "value" ] = val aux [ "attributes" ] = [ ] if aux not in info : info . append ( aux ) return info
Method to process and extract the entities of a usufy
426
12
237,399
def doBenchmark ( plats ) : logger = logging . getLogger ( "osrframework.utils" ) # defining the results dict res = { } # args args = [ ] #for p in plats: # args.append( (str(p),) ) # selecting the number of tries to be performed tries = [ 1 , 4 , 8 , 16 , 24 , 32 , 40 , 48 , 56 , 64 ] #for i in range(1, len(plats)/10): # tries.append(i*10) logger . info ( "The test is starting recovering webpages by creating the following series of threads: " + str ( tries ) ) for i in tries : print "Testing creating " + str ( i ) + " simultaneous threads..." # starting t0 = time . clock ( ) pool = Pool ( i ) # We call the wrapping function with all the args previously generated poolResults = pool . map ( multi_run_wrapper , args ) t1 = time . clock ( ) # storing the results res [ i ] = t1 - t0 print str ( i ) + "\t" + str ( res [ i ] ) + "\n" return res
Perform the benchmark ...
250
5