idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
26,200
def main ( host = 'localhost' , port = 8086 ) : user = 'root' password = 'root' dbname = 'example' dbuser = 'smly' dbuser_password = 'my_secret_password' query = 'select Float_value from cpu_load_short;' query_where = 'select Int_value from cpu_load_short where host=$host;' bind_params = { 'host' : 'server01' } json_body = [ { "measurement" : "cpu_load_short" , "tags" : { "host" : "server01" , "region" : "us-west" } , "time" : "2009-11-10T23:00:00Z" , "fields" : { "Float_value" : 0.64 , "Int_value" : 3 , "String_value" : "Text" , "Bool_value" : True } } ] client = InfluxDBClient ( host , port , user , password , dbname ) print ( "Create database: " + dbname ) client . create_database ( dbname ) print ( "Create a retention policy" ) client . create_retention_policy ( 'awesome_policy' , '3d' , 3 , default = True ) print ( "Switch user: " + dbuser ) client . switch_user ( dbuser , dbuser_password ) print ( "Write points: {0}" . format ( json_body ) ) client . write_points ( json_body ) print ( "Querying data: " + query ) result = client . query ( query ) print ( "Result: {0}" . format ( result ) ) print ( "Querying data: " + query_where ) result = client . query ( query_where , bind_params = bind_params ) print ( "Result: {0}" . format ( result ) ) print ( "Switch user: " + user ) client . switch_user ( user , password ) print ( "Drop database: " + dbname ) client . drop_database ( dbname )
Instantiate a connection to the InfluxDB .
26,201
def main ( uport ) : json_body = { "tags" : { "host" : "server01" , "region" : "us-west" } , "time" : "2009-11-10T23:00:00Z" , "points" : [ { "measurement" : "cpu_load_short" , "fields" : { "value" : 0.64 } } , { "measurement" : "cpu_load_short" , "fields" : { "value" : 0.67 } } ] } client = InfluxDBClient ( use_udp = True , udp_port = uport ) client . send_packet ( json_body )
Instantiate connection to the InfluxDB .
26,202
def _json_body_ ( cls ) : json = [ ] for series_name , data in six . iteritems ( cls . _datapoints ) : for point in data : json_point = { "measurement" : series_name , "fields" : { } , "tags" : { } , "time" : getattr ( point , "time" ) } for field in cls . _fields : value = getattr ( point , field ) if value is not None : json_point [ 'fields' ] [ field ] = value for tag in cls . _tags : json_point [ 'tags' ] [ tag ] = getattr ( point , tag ) json . append ( json_point ) return json
Return the JSON body of given datapoints .
26,203
def main ( host = 'localhost' , port = 8086 , nb_day = 15 ) : nb_day = 15 timeinterval_min = 5 total_minutes = 1440 * nb_day total_records = int ( total_minutes / timeinterval_min ) now = datetime . datetime . today ( ) metric = "server_data.cpu_idle" series = [ ] for i in range ( 0 , total_records ) : past_date = now - datetime . timedelta ( minutes = i * timeinterval_min ) value = random . randint ( 0 , 200 ) hostName = "server-%d" % random . randint ( 1 , 5 ) pointValues = { "time" : int ( past_date . strftime ( '%s' ) ) , "measurement" : metric , "fields" : { "value" : value , } , "tags" : { "hostName" : hostName , } , } series . append ( pointValues ) print ( series ) client = InfluxDBClient ( host , port , USER , PASSWORD , DBNAME ) print ( "Create database: " + DBNAME ) try : client . create_database ( DBNAME ) except InfluxDBClientError : client . drop_database ( DBNAME ) client . create_database ( DBNAME ) print ( "Create a retention policy" ) retention_policy = 'server_data' client . create_retention_policy ( retention_policy , '3d' , 3 , default = True ) print ( "Write points #: {0}" . format ( total_records ) ) client . write_points ( series , retention_policy = retention_policy ) time . sleep ( 2 ) query = "SELECT MEAN(value) FROM {} WHERE \ time > now() - 10d GROUP BY time(500m)" . format ( metric ) result = client . query ( query , database = DBNAME ) print ( result ) print ( "Result: {0}" . format ( result ) ) print ( "Drop database: {}" . format ( DBNAME ) ) client . drop_database ( DBNAME )
Instantiate a connection to the backend .
26,204
def main ( host = 'localhost' , port = 8086 ) : now = datetime . datetime . today ( ) points = [ ] for angle in range ( 0 , 360 ) : y = 10 + math . sin ( math . radians ( angle ) ) * 10 point = { "measurement" : 'foobar' , "time" : int ( now . strftime ( '%s' ) ) + angle , "fields" : { "value" : y } } points . append ( point ) client = InfluxDBClient ( host , port , USER , PASSWORD , DBNAME ) print ( "Create database: " + DBNAME ) client . create_database ( DBNAME ) client . switch_database ( DBNAME ) client . write_points ( points ) time . sleep ( 3 ) query = 'SELECT * FROM foobar' print ( "Querying data: " + query ) result = client . query ( query , database = DBNAME ) print ( "Result: {0}" . format ( result ) ) print ( "Delete database: " + DBNAME ) client . drop_database ( DBNAME )
Define function to generate the sin wave .
26,205
def get_points ( self , measurement = None , tags = None ) : if not isinstance ( measurement , ( bytes , type ( b'' . decode ( ) ) , type ( None ) ) ) : raise TypeError ( 'measurement must be an str or None' ) for series in self . _get_series ( ) : series_name = series . get ( 'measurement' , series . get ( 'name' , 'results' ) ) if series_name is None : if tags is None : for item in self . _get_points_for_series ( series ) : yield item elif measurement in ( None , series_name ) : series_tags = series . get ( 'tags' , { } ) for item in self . _get_points_for_series ( series ) : if tags is None or self . _tag_matches ( item , tags ) or self . _tag_matches ( series_tags , tags ) : yield item
Return a generator for all the points that match the given filters .
26,206
def keys ( self ) : keys = [ ] for series in self . _get_series ( ) : keys . append ( ( series . get ( 'measurement' , series . get ( 'name' , 'results' ) ) , series . get ( 'tags' , None ) ) ) return keys
Return the list of keys in the ResultSet .
26,207
def items ( self ) : items = [ ] for series in self . _get_series ( ) : series_key = ( series . get ( 'measurement' , series . get ( 'name' , 'results' ) ) , series . get ( 'tags' , None ) ) items . append ( ( series_key , self . _get_points_for_series ( series ) ) ) return items
Return the set of items from the ResultSet .
26,208
def _get_points_for_series ( self , series ) : for point in series . get ( 'values' , [ ] ) : yield self . point_from_cols_vals ( series [ 'columns' ] , point )
Return generator of dict from columns and values of a series .
26,209
def point_from_cols_vals ( cols , vals ) : point = { } for col_index , col_name in enumerate ( cols ) : point [ col_name ] = vals [ col_index ] return point
Create a dict from columns and values lists .
26,210
def from_dsn ( dsn , ** kwargs ) : r init_args = { } conn_params = urlparse ( dsn ) scheme_info = conn_params . scheme . split ( '+' ) if len ( scheme_info ) == 1 : scheme = scheme_info [ 0 ] modifier = None else : modifier , scheme = scheme_info if scheme != 'influxdb' : raise ValueError ( 'Unknown scheme "{0}".' . format ( scheme ) ) if modifier : if modifier == 'udp' : init_args [ 'use_udp' ] = True elif modifier == 'https' : init_args [ 'ssl' ] = True else : raise ValueError ( 'Unknown modifier "{0}".' . format ( modifier ) ) if conn_params . hostname : init_args [ 'host' ] = conn_params . hostname if conn_params . port : init_args [ 'port' ] = conn_params . port if conn_params . username : init_args [ 'username' ] = conn_params . username if conn_params . password : init_args [ 'password' ] = conn_params . password if conn_params . path and len ( conn_params . path ) > 1 : init_args [ 'database' ] = conn_params . path [ 1 : ] init_args . update ( kwargs ) return InfluxDBClient ( ** init_args )
r Return an instaance of InfluxDBClient from given data source name .
26,211
def request ( self , url , method = 'GET' , params = None , data = None , expected_response_code = 200 ) : url = "{0}/{1}" . format ( self . _baseurl , url ) if params is None : params = { } auth = { 'u' : self . _username , 'p' : self . _password } params . update ( auth ) if data is not None and not isinstance ( data , str ) : data = json . dumps ( data ) retry = True _try = 0 while retry : try : response = session . request ( method = method , url = url , params = params , data = data , headers = self . _headers , verify = self . _verify_ssl , timeout = self . _timeout ) break except ( requests . exceptions . ConnectionError , requests . exceptions . Timeout ) : _try += 1 if self . _retries != 0 : retry = _try < self . _retries else : raise requests . exceptions . ConnectionError if response . status_code == expected_response_code : return response else : raise InfluxDBClientError ( response . content , response . status_code )
Make a http request to API .
26,212
def write ( self , data ) : self . request ( url = "write" , method = 'POST' , params = None , data = data , expected_response_code = 200 ) return True
Provide as convenience for influxdb v0 . 9 . 0 this may change .
26,213
def delete_points ( self , name ) : url = "db/{0}/series/{1}" . format ( self . _database , name ) self . request ( url = url , method = 'DELETE' , expected_response_code = 204 ) return True
Delete an entire series .
26,214
def query ( self , query , time_precision = 's' , chunked = False ) : return self . _query ( query , time_precision = time_precision , chunked = chunked )
Query data from the influxdb v0 . 8 database .
26,215
def create_database ( self , database ) : url = "db" data = { 'name' : database } self . request ( url = url , method = 'POST' , data = data , expected_response_code = 201 ) return True
Create a database on the InfluxDB server .
26,216
def delete_database ( self , database ) : url = "db/{0}" . format ( database ) self . request ( url = url , method = 'DELETE' , expected_response_code = 204 ) return True
Drop a database on the InfluxDB server .
26,217
def get_list_database ( self ) : url = "db" response = self . request ( url = url , method = 'GET' , expected_response_code = 200 ) return response . json ( )
Get the list of databases .
26,218
def delete_series ( self , series ) : url = "db/{0}/series/{1}" . format ( self . _database , series ) self . request ( url = url , method = 'DELETE' , expected_response_code = 204 ) return True
Drop a series on the InfluxDB server .
26,219
def get_list_cluster_admins ( self ) : response = self . request ( url = "cluster_admins" , method = 'GET' , expected_response_code = 200 ) return response . json ( )
Get list of cluster admins .
26,220
def add_cluster_admin ( self , new_username , new_password ) : data = { 'name' : new_username , 'password' : new_password } self . request ( url = "cluster_admins" , method = 'POST' , data = data , expected_response_code = 200 ) return True
Add cluster admin .
26,221
def update_cluster_admin_password ( self , username , new_password ) : url = "cluster_admins/{0}" . format ( username ) data = { 'password' : new_password } self . request ( url = url , method = 'POST' , data = data , expected_response_code = 200 ) return True
Update cluster admin password .
26,222
def delete_cluster_admin ( self , username ) : url = "cluster_admins/{0}" . format ( username ) self . request ( url = url , method = 'DELETE' , expected_response_code = 200 ) return True
Delete cluster admin .
26,223
def alter_database_admin ( self , username , is_admin ) : url = "db/{0}/users/{1}" . format ( self . _database , username ) data = { 'admin' : is_admin } self . request ( url = url , method = 'POST' , data = data , expected_response_code = 200 ) return True
Alter the database admin .
26,224
def get_database_users ( self ) : url = "db/{0}/users" . format ( self . _database ) response = self . request ( url = url , method = 'GET' , expected_response_code = 200 ) return response . json ( )
Get list of database users .
26,225
def add_database_user ( self , new_username , new_password , permissions = None ) : url = "db/{0}/users" . format ( self . _database ) data = { 'name' : new_username , 'password' : new_password } if permissions : try : data [ 'readFrom' ] , data [ 'writeTo' ] = permissions except ( ValueError , TypeError ) : raise TypeError ( "'permissions' must be (readFrom, writeTo) tuple" ) self . request ( url = url , method = 'POST' , data = data , expected_response_code = 200 ) return True
Add database user .
26,226
def delete_database_user ( self , username ) : url = "db/{0}/users/{1}" . format ( self . _database , username ) self . request ( url = url , method = 'DELETE' , expected_response_code = 200 ) return True
Delete database user .
26,227
def send_packet ( self , packet ) : data = json . dumps ( packet ) byte = data . encode ( 'utf-8' ) self . udp_socket . sendto ( byte , ( self . _host , self . _udp_port ) )
Send a UDP packet along the wire .
26,228
def query ( self , query , time_precision = 's' , chunked = False ) : result = InfluxDBClient . query ( self , query = query , time_precision = time_precision , chunked = chunked ) if len ( result ) == 0 : return result elif len ( result ) == 1 : return self . _to_dataframe ( result [ 0 ] , time_precision ) else : ret = { } for time_series in result : ret [ time_series [ 'name' ] ] = self . _to_dataframe ( time_series , time_precision ) return ret
Query data into DataFrames .
26,229
def validate ( self , value ) : query = Query ( sql = value ) passes_blacklist , failing_words = query . passes_blacklist ( ) error = MSG_FAILED_BLACKLIST % ', ' . join ( failing_words ) if not passes_blacklist else None if error : raise ValidationError ( error , code = "InvalidSql" )
Ensure that the SQL passes the blacklist .
26,230
def build_schema_info ( connection_alias ) : connection = get_valid_connection ( connection_alias ) ret = [ ] with connection . cursor ( ) as cursor : tables_to_introspect = connection . introspection . table_names ( cursor , include_views = _include_views ( ) ) for table_name in tables_to_introspect : if not _include_table ( table_name ) : continue td = [ ] table_description = connection . introspection . get_table_description ( cursor , table_name ) for row in table_description : column_name = row [ 0 ] try : field_type = connection . introspection . get_field_type ( row [ 1 ] , row ) except KeyError as e : field_type = 'Unknown' td . append ( ( column_name , field_type ) ) ret . append ( ( table_name , td ) ) return ret
Construct schema information via engine - specific queries of the tables in the DB .
26,231
def creates_cycle ( connections , test ) : i , o = test if i == o : return True visited = { o } while True : num_added = 0 for a , b in connections : if a in visited and b not in visited : if b == i : return True visited . add ( b ) num_added += 1 if num_added == 0 : return False
Returns true if the addition of the test connection would create a cycle assuming that no cycle already exists in the graph represented by connections .
26,232
def run ( config_file ) : config = neat . Config ( neat . DefaultGenome , neat . DefaultReproduction , neat . DefaultSpeciesSet , neat . DefaultStagnation , config_file ) p = neat . Population ( config ) p . add_reporter ( neat . StdOutReporter ( True ) ) stats = neat . StatisticsReporter ( ) p . add_reporter ( stats ) pe = neat . ThreadedEvaluator ( 4 , eval_genome ) winner = p . run ( pe . evaluate , 300 ) pe . stop ( ) print ( '\nBest genome:\n{!s}' . format ( winner ) ) print ( '\nOutput:' ) winner_net = neat . nn . FeedForwardNetwork . create ( winner , config ) for xi , xo in zip ( xor_inputs , xor_outputs ) : output = winner_net . activate ( xi ) print ( "input {!r}, expected output {!r}, got {!r}" . format ( xi , xo , output ) ) if visualize is not None : node_names = { - 1 : 'A' , - 2 : 'B' , 0 : 'A XOR B' } visualize . draw_net ( config , winner , True , node_names = node_names ) visualize . plot_stats ( stats , ylog = False , view = True ) visualize . plot_species ( stats , view = True )
load the config create a population evolve and show the result
26,233
def plot_stats ( statistics , ylog = False , view = False , filename = 'avg_fitness.svg' ) : if plt is None : warnings . warn ( "This display is not available due to a missing optional dependency (matplotlib)" ) return generation = range ( len ( statistics . most_fit_genomes ) ) best_fitness = [ c . fitness for c in statistics . most_fit_genomes ] avg_fitness = np . array ( statistics . get_fitness_mean ( ) ) stdev_fitness = np . array ( statistics . get_fitness_stdev ( ) ) plt . plot ( generation , avg_fitness , 'b-' , label = "average" ) plt . plot ( generation , avg_fitness - stdev_fitness , 'g-.' , label = "-1 sd" ) plt . plot ( generation , avg_fitness + stdev_fitness , 'g-.' , label = "+1 sd" ) plt . plot ( generation , best_fitness , 'r-' , label = "best" ) plt . title ( "Population's average and best fitness" ) plt . xlabel ( "Generations" ) plt . ylabel ( "Fitness" ) plt . grid ( ) plt . legend ( loc = "best" ) if ylog : plt . gca ( ) . set_yscale ( 'symlog' ) plt . savefig ( filename ) if view : plt . show ( ) plt . close ( )
Plots the population s average and best fitness .
26,234
def plot_species ( statistics , view = False , filename = 'speciation.svg' ) : if plt is None : warnings . warn ( "This display is not available due to a missing optional dependency (matplotlib)" ) return species_sizes = statistics . get_species_sizes ( ) num_generations = len ( species_sizes ) curves = np . array ( species_sizes ) . T fig , ax = plt . subplots ( ) ax . stackplot ( range ( num_generations ) , * curves ) plt . title ( "Speciation" ) plt . ylabel ( "Size per Species" ) plt . xlabel ( "Generations" ) plt . savefig ( filename ) if view : plt . show ( ) plt . close ( )
Visualizes speciation throughout evolution .
26,235
def save_checkpoint ( self , config , population , species_set , generation ) : filename = '{0}{1}' . format ( self . filename_prefix , generation ) print ( "Saving checkpoint to {0}" . format ( filename ) ) with gzip . open ( filename , 'w' , compresslevel = 5 ) as f : data = ( generation , config , population , species_set , random . getstate ( ) ) pickle . dump ( data , f , protocol = pickle . HIGHEST_PROTOCOL )
Save the current simulation state .
26,236
def restore_checkpoint ( filename ) : with gzip . open ( filename ) as f : generation , config , population , species_set , rndstate = pickle . load ( f ) random . setstate ( rndstate ) return Population ( config , ( population , species_set , generation ) )
Resumes the simulation from a previous saved point .
26,237
def host_is_local ( hostname , port = 22 ) : hostname = socket . getfqdn ( hostname ) if hostname in ( "localhost" , "0.0.0.0" , "127.0.0.1" , "1.0.0.127.in-addr.arpa" , "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" ) : return True localhost = socket . gethostname ( ) if hostname == localhost : return True localaddrs = socket . getaddrinfo ( localhost , port ) targetaddrs = socket . getaddrinfo ( hostname , port ) for ( ignored_family , ignored_socktype , ignored_proto , ignored_canonname , sockaddr ) in localaddrs : for ( ignored_rfamily , ignored_rsocktype , ignored_rproto , ignored_rcanonname , rsockaddr ) in targetaddrs : if rsockaddr [ 0 ] == sockaddr [ 0 ] : return True return False
Returns True if the hostname points to the localhost otherwise False .
26,238
def _determine_mode ( addr , mode ) : if isinstance ( addr , tuple ) : host = addr [ 0 ] elif type ( addr ) == type ( b"binary_string" ) : host = addr else : raise TypeError ( "'addr' needs to be a tuple or an bytestring!" ) if mode == MODE_AUTO : if host_is_local ( host ) : return MODE_PRIMARY return MODE_SECONDARY elif mode in ( MODE_SECONDARY , MODE_PRIMARY ) : return mode else : raise ValueError ( "Invalid mode {!r}!" . format ( mode ) )
Returns the mode which should be used . If mode is MODE_AUTO this is determined by checking if addr points to the local host . If it does return MODE_PRIMARY else return MODE_SECONDARY . If mode is either MODE_PRIMARY or MODE_SECONDARY return the mode argument . Otherwise a ValueError is raised .
26,239
def chunked ( data , chunksize ) : if chunksize < 1 : raise ValueError ( "Chunksize must be at least 1!" ) if int ( chunksize ) != chunksize : raise ValueError ( "Chunksize needs to be an integer" ) res = [ ] cur = [ ] for e in data : cur . append ( e ) if len ( cur ) >= chunksize : res . append ( cur ) cur = [ ] if cur : res . append ( cur ) return res
Returns a list of chunks containing at most chunksize elements of data .
26,240
def start ( self ) : if self . mode == MODE_PRIMARY : i = self . _start ( ) else : i = self . _connect ( ) self . manager = i
Starts or connects to the manager .
26,241
def set_secondary_state ( self , value ) : if value not in ( _STATE_RUNNING , _STATE_SHUTDOWN , _STATE_FORCED_SHUTDOWN ) : raise ValueError ( "State {!r} is invalid - needs to be one of _STATE_RUNNING, _STATE_SHUTDOWN, or _STATE_FORCED_SHUTDOWN" . format ( value ) ) if self . manager is None : raise RuntimeError ( "Manager not started" ) self . manager . set_state ( value )
Sets the value for secondary_state .
26,242
def _get_manager_class ( self , register_callables = False ) : class _EvaluatorSyncManager ( managers . BaseManager ) : pass inqueue = queue . Queue ( ) outqueue = queue . Queue ( ) namespace = Namespace ( ) if register_callables : _EvaluatorSyncManager . register ( "get_inqueue" , callable = lambda : inqueue , ) _EvaluatorSyncManager . register ( "get_outqueue" , callable = lambda : outqueue , ) _EvaluatorSyncManager . register ( "get_state" , callable = self . _get_secondary_state , ) _EvaluatorSyncManager . register ( "set_state" , callable = lambda v : self . _secondary_state . set ( v ) , ) _EvaluatorSyncManager . register ( "get_namespace" , callable = lambda : namespace , ) else : _EvaluatorSyncManager . register ( "get_inqueue" , ) _EvaluatorSyncManager . register ( "get_outqueue" , ) _EvaluatorSyncManager . register ( "get_state" , ) _EvaluatorSyncManager . register ( "set_state" , ) _EvaluatorSyncManager . register ( "get_namespace" , ) return _EvaluatorSyncManager
Returns a new Manager subclass with registered methods . If register_callable is True defines the callable arguments .
26,243
def _connect ( self ) : cls = self . _get_manager_class ( register_callables = False ) ins = cls ( address = self . addr , authkey = self . authkey ) ins . connect ( ) return ins
Connects to the manager .
26,244
def _start ( self ) : cls = self . _get_manager_class ( register_callables = True ) ins = cls ( address = self . addr , authkey = self . authkey ) ins . start ( ) return ins
Starts the manager .
26,245
def _start_primary ( self ) : self . em . start ( ) self . em . set_secondary_state ( _STATE_RUNNING ) self . _set_shared_instances ( )
Start as the primary
26,246
def _set_shared_instances ( self ) : self . inqueue = self . em . get_inqueue ( ) self . outqueue = self . em . get_outqueue ( ) self . namespace = self . em . get_namespace ( )
Sets attributes from the shared instances .
26,247
def _reset_em ( self ) : self . em = _ExtendedManager ( self . addr , self . authkey , mode = self . mode , start = False ) self . em . start ( ) self . _set_shared_instances ( )
Resets self . em and the shared instances .
26,248
def _secondary_loop ( self , reconnect = False ) : if self . num_workers > 1 : pool = multiprocessing . Pool ( self . num_workers ) else : pool = None should_reconnect = True while should_reconnect : i = 0 running = True try : self . _reset_em ( ) except ( socket . error , EOFError , IOError , OSError , socket . gaierror , TypeError ) : continue while running : i += 1 if i % 5 == 0 : try : state = self . em . secondary_state except ( socket . error , EOFError , IOError , OSError , socket . gaierror , TypeError ) : if not reconnect : raise else : break if state == _STATE_FORCED_SHUTDOWN : running = False should_reconnect = False elif state == _STATE_SHUTDOWN : running = False if not running : continue try : tasks = self . inqueue . get ( block = True , timeout = 0.2 ) except queue . Empty : continue except ( socket . error , EOFError , IOError , OSError , socket . gaierror , TypeError ) : break except ( managers . RemoteError , multiprocessing . ProcessError ) as e : if ( 'Empty' in repr ( e ) ) or ( 'TimeoutError' in repr ( e ) ) : continue if ( ( 'EOFError' in repr ( e ) ) or ( 'PipeError' in repr ( e ) ) or ( 'AuthenticationError' in repr ( e ) ) ) : break raise if pool is None : res = [ ] for genome_id , genome , config in tasks : fitness = self . eval_function ( genome , config ) res . append ( ( genome_id , fitness ) ) else : genome_ids = [ ] jobs = [ ] for genome_id , genome , config in tasks : genome_ids . append ( genome_id ) jobs . append ( pool . apply_async ( self . eval_function , ( genome , config ) ) ) results = [ job . get ( timeout = self . worker_timeout ) for job in jobs ] res = zip ( genome_ids , results ) try : self . outqueue . put ( res ) except ( socket . error , EOFError , IOError , OSError , socket . gaierror , TypeError ) : break except ( managers . RemoteError , multiprocessing . ProcessError ) as e : if ( 'Empty' in repr ( e ) ) or ( 'TimeoutError' in repr ( e ) ) : continue if ( ( 'EOFError' in repr ( e ) ) or ( 'PipeError' in repr ( e ) ) or ( 'AuthenticationError' in repr ( e ) ) ) : break raise if not reconnect : should_reconnect = False break if pool is not None : pool . terminate ( )
The worker loop for the secondary nodes .
26,249
def evaluate ( self , genomes , config ) : if self . mode != MODE_PRIMARY : raise ModeError ( "Not in primary mode!" ) tasks = [ ( genome_id , genome , config ) for genome_id , genome in genomes ] id2genome = { genome_id : genome for genome_id , genome in genomes } tasks = chunked ( tasks , self . secondary_chunksize ) n_tasks = len ( tasks ) for task in tasks : self . inqueue . put ( task ) tresults = [ ] while len ( tresults ) < n_tasks : try : sr = self . outqueue . get ( block = True , timeout = 0.2 ) except ( queue . Empty , managers . RemoteError ) : continue tresults . append ( sr ) results = [ ] for sr in tresults : results += sr for genome_id , fitness in results : genome = id2genome [ genome_id ] genome . fitness = fitness
Evaluates the genomes . This method raises a ModeError if the DistributedEvaluator is not in primary mode .
26,250
def reproduce ( self , config , species , pop_size , generation ) : all_fitnesses = [ ] remaining_species = [ ] for stag_sid , stag_s , stagnant in self . stagnation . update ( species , generation ) : if stagnant : self . reporters . species_stagnant ( stag_sid , stag_s ) else : all_fitnesses . extend ( m . fitness for m in itervalues ( stag_s . members ) ) remaining_species . append ( stag_s ) if not remaining_species : species . species = { } return { } min_fitness = min ( all_fitnesses ) max_fitness = max ( all_fitnesses ) fitness_range = max ( 1.0 , max_fitness - min_fitness ) for afs in remaining_species : msf = mean ( [ m . fitness for m in itervalues ( afs . members ) ] ) af = ( msf - min_fitness ) / fitness_range afs . adjusted_fitness = af adjusted_fitnesses = [ s . adjusted_fitness for s in remaining_species ] avg_adjusted_fitness = mean ( adjusted_fitnesses ) self . reporters . info ( "Average adjusted fitness: {:.3f}" . format ( avg_adjusted_fitness ) ) previous_sizes = [ len ( s . members ) for s in remaining_species ] min_species_size = self . reproduction_config . min_species_size min_species_size = max ( min_species_size , self . reproduction_config . elitism ) spawn_amounts = self . compute_spawn ( adjusted_fitnesses , previous_sizes , pop_size , min_species_size ) new_population = { } species . species = { } for spawn , s in zip ( spawn_amounts , remaining_species ) : spawn = max ( spawn , self . reproduction_config . elitism ) assert spawn > 0 old_members = list ( iteritems ( s . members ) ) s . members = { } species . species [ s . key ] = s old_members . sort ( reverse = True , key = lambda x : x [ 1 ] . fitness ) if self . reproduction_config . elitism > 0 : for i , m in old_members [ : self . reproduction_config . elitism ] : new_population [ i ] = m spawn -= 1 if spawn <= 0 : continue repro_cutoff = int ( math . ceil ( self . reproduction_config . survival_threshold * len ( old_members ) ) ) repro_cutoff = max ( repro_cutoff , 2 ) old_members = old_members [ : repro_cutoff ] while spawn > 0 : spawn -= 1 parent1_id , parent1 = random . choice ( old_members ) parent2_id , parent2 = random . choice ( old_members ) gid = next ( self . genome_indexer ) child = config . genome_type ( gid ) child . configure_crossover ( parent1 , parent2 , config . genome_config ) child . mutate ( config . genome_config ) new_population [ gid ] = child self . ancestors [ gid ] = ( parent1_id , parent2_id ) return new_population
Handles creation of genomes either from scratch or by sexual or asexual reproduction from parents .
26,251
def run ( self , fitness_function , n = None ) : if self . config . no_fitness_termination and ( n is None ) : raise RuntimeError ( "Cannot have no generational limit with no fitness termination" ) k = 0 while n is None or k < n : k += 1 self . reporters . start_generation ( self . generation ) fitness_function ( list ( iteritems ( self . population ) ) , self . config ) best = None for g in itervalues ( self . population ) : if best is None or g . fitness > best . fitness : best = g self . reporters . post_evaluate ( self . config , self . population , self . species , best ) if self . best_genome is None or best . fitness > self . best_genome . fitness : self . best_genome = best if not self . config . no_fitness_termination : fv = self . fitness_criterion ( g . fitness for g in itervalues ( self . population ) ) if fv >= self . config . fitness_threshold : self . reporters . found_solution ( self . config , self . generation , best ) break self . population = self . reproduction . reproduce ( self . config , self . species , self . config . pop_size , self . generation ) if not self . species . species : self . reporters . complete_extinction ( ) if self . config . reset_on_extinction : self . population = self . reproduction . create_new ( self . config . genome_type , self . config . genome_config , self . config . pop_size ) else : raise CompleteExtinctionException ( ) self . species . speciate ( self . config , self . population , self . generation ) self . reporters . end_generation ( self . config , self . population , self . species ) self . generation += 1 if self . config . no_fitness_termination : self . reporters . found_solution ( self . config , self . generation , self . best_genome ) return self . best_genome
Runs NEAT s genetic algorithm for at most n generations . If n is None run until solution is found or extinction occurs .
26,252
def crossover ( self , gene2 ) : assert self . key == gene2 . key new_gene = self . __class__ ( self . key ) for a in self . _gene_attributes : if random ( ) > 0.5 : setattr ( new_gene , a . name , getattr ( self , a . name ) ) else : setattr ( new_gene , a . name , getattr ( gene2 , a . name ) ) return new_gene
Creates a new gene randomly inheriting attributes from its parents .
26,253
def distance ( self , other , config ) : node_distance = 0.0 if self . nodes or other . nodes : disjoint_nodes = 0 for k2 in iterkeys ( other . nodes ) : if k2 not in self . nodes : disjoint_nodes += 1 for k1 , n1 in iteritems ( self . nodes ) : n2 = other . nodes . get ( k1 ) if n2 is None : disjoint_nodes += 1 else : node_distance += n1 . distance ( n2 , config ) max_nodes = max ( len ( self . nodes ) , len ( other . nodes ) ) node_distance = ( node_distance + config . compatibility_disjoint_coefficient * disjoint_nodes ) / max_nodes connection_distance = 0.0 if self . connections or other . connections : disjoint_connections = 0 for k2 in iterkeys ( other . connections ) : if k2 not in self . connections : disjoint_connections += 1 for k1 , c1 in iteritems ( self . connections ) : c2 = other . connections . get ( k1 ) if c2 is None : disjoint_connections += 1 else : connection_distance += c1 . distance ( c2 , config ) max_conn = max ( len ( self . connections ) , len ( other . connections ) ) connection_distance = ( connection_distance + config . compatibility_disjoint_coefficient * disjoint_connections ) / max_conn distance = node_distance + connection_distance return distance
Returns the genetic distance between this genome and the other . This distance value is used to compute genome compatibility for speciation .
26,254
def advance ( self , inputs , advance_time , time_step = None ) : final_time_seconds = self . time_seconds + advance_time if time_step is None : time_step = 0.5 * self . get_max_time_step ( ) if len ( self . input_nodes ) != len ( inputs ) : raise RuntimeError ( "Expected {0} inputs, got {1}" . format ( len ( self . input_nodes ) , len ( inputs ) ) ) while self . time_seconds < final_time_seconds : dt = min ( time_step , final_time_seconds - self . time_seconds ) ivalues = self . values [ self . active ] ovalues = self . values [ 1 - self . active ] self . active = 1 - self . active for i , v in zip ( self . input_nodes , inputs ) : ivalues [ i ] = v ovalues [ i ] = v for node_key , ne in iteritems ( self . node_evals ) : node_inputs = [ ivalues [ i ] * w for i , w in ne . links ] s = ne . aggregation ( node_inputs ) z = ne . activation ( ne . bias + ne . response * s ) ovalues [ node_key ] += dt / ne . time_constant * ( - ovalues [ node_key ] + z ) self . time_seconds += dt ovalues = self . values [ 1 - self . active ] return [ ovalues [ i ] for i in self . output_nodes ]
Advance the simulation by the given amount of time assuming that inputs are constant at the given values during the simulated time .
26,255
def update ( self , species_set , generation ) : species_data = [ ] for sid , s in iteritems ( species_set . species ) : if s . fitness_history : prev_fitness = max ( s . fitness_history ) else : prev_fitness = - sys . float_info . max s . fitness = self . species_fitness_func ( s . get_fitnesses ( ) ) s . fitness_history . append ( s . fitness ) s . adjusted_fitness = None if prev_fitness is None or s . fitness > prev_fitness : s . last_improved = generation species_data . append ( ( sid , s ) ) species_data . sort ( key = lambda x : x [ 1 ] . fitness ) result = [ ] species_fitnesses = [ ] num_non_stagnant = len ( species_data ) for idx , ( sid , s ) in enumerate ( species_data ) : stagnant_time = generation - s . last_improved is_stagnant = False if num_non_stagnant > self . stagnation_config . species_elitism : is_stagnant = stagnant_time >= self . stagnation_config . max_stagnation if ( len ( species_data ) - idx ) <= self . stagnation_config . species_elitism : is_stagnant = False if is_stagnant : num_non_stagnant -= 1 result . append ( ( sid , s , is_stagnant ) ) species_fitnesses . append ( s . fitness ) return result
Required interface method . Updates species fitness history information checking for ones that have not improved in max_stagnation generations and - unless it would result in the number of species dropping below the configured species_elitism parameter if they were removed in which case the highest - fitness species are spared - returns a list with stagnant species marked for removal .
26,256
def start ( self ) : if self . working : return self . working = True for i in range ( self . num_workers ) : w = threading . Thread ( name = "Worker Thread #{i}" . format ( i = i ) , target = self . _worker , ) w . daemon = True w . start ( ) self . workers . append ( w )
Starts the worker threads
26,257
def stop ( self ) : self . working = False for w in self . workers : w . join ( ) self . workers = [ ]
Stops the worker threads and waits for them to finish
26,258
def _worker ( self ) : while self . working : try : genome_id , genome , config = self . inqueue . get ( block = True , timeout = 0.2 , ) except queue . Empty : continue f = self . eval_function ( genome , config ) self . outqueue . put ( ( genome_id , genome , f ) )
The worker function
26,259
def evaluate ( self , genomes , config ) : if not self . working : self . start ( ) p = 0 for genome_id , genome in genomes : p += 1 self . inqueue . put ( ( genome_id , genome , config ) ) while p > 0 : p -= 1 ignored_genome_id , genome , fitness = self . outqueue . get ( ) genome . fitness = fitness
Evaluate the genomes
26,260
def advance ( self , dt_msec ) : try : self . v += 0.5 * dt_msec * ( 0.04 * self . v ** 2 + 5 * self . v + 140 - self . u + self . current ) self . v += 0.5 * dt_msec * ( 0.04 * self . v ** 2 + 5 * self . v + 140 - self . u + self . current ) self . u += dt_msec * self . a * ( self . b * self . v - self . u ) except OverflowError : self . v = self . c self . u = self . b * self . v self . fired = 0.0 if self . v > 30.0 : self . fired = 1.0 self . v = self . c self . u += self . d
Advances simulation time by the given time step in milliseconds .
26,261
def reset ( self ) : self . v = self . c self . u = self . b * self . v self . fired = 0.0 self . current = self . bias
Resets all state variables .
26,262
def set_inputs ( self , inputs ) : if len ( inputs ) != len ( self . inputs ) : raise RuntimeError ( "Number of inputs {0:d} does not match number of input nodes {1:d}" . format ( len ( inputs ) , len ( self . inputs ) ) ) for i , v in zip ( self . inputs , inputs ) : self . input_values [ i ] = v
Assign input voltages .
26,263
def compute_output ( t0 , t1 ) : if t0 is None or t1 is None : return - 1.0 else : response = 1.1 - 0.1 * abs ( t0 - t1 ) return max ( 0.0 , min ( 1.0 , response ) )
Compute the network s output based on the time to first spike of the two output neurons .
26,264
def speciate ( self , config , population , generation ) : assert isinstance ( population , dict ) compatibility_threshold = self . species_set_config . compatibility_threshold unspeciated = set ( iterkeys ( population ) ) distances = GenomeDistanceCache ( config . genome_config ) new_representatives = { } new_members = { } for sid , s in iteritems ( self . species ) : candidates = [ ] for gid in unspeciated : g = population [ gid ] d = distances ( s . representative , g ) candidates . append ( ( d , g ) ) ignored_rdist , new_rep = min ( candidates , key = lambda x : x [ 0 ] ) new_rid = new_rep . key new_representatives [ sid ] = new_rid new_members [ sid ] = [ new_rid ] unspeciated . remove ( new_rid ) while unspeciated : gid = unspeciated . pop ( ) g = population [ gid ] candidates = [ ] for sid , rid in iteritems ( new_representatives ) : rep = population [ rid ] d = distances ( rep , g ) if d < compatibility_threshold : candidates . append ( ( d , sid ) ) if candidates : ignored_sdist , sid = min ( candidates , key = lambda x : x [ 0 ] ) new_members [ sid ] . append ( gid ) else : sid = next ( self . indexer ) new_representatives [ sid ] = gid new_members [ sid ] = [ gid ] self . genome_to_species = { } for sid , rid in iteritems ( new_representatives ) : s = self . species . get ( sid ) if s is None : s = Species ( sid , generation ) self . species [ sid ] = s members = new_members [ sid ] for gid in members : self . genome_to_species [ gid ] = sid member_dict = dict ( ( gid , population [ gid ] ) for gid in members ) s . update ( population [ rid ] , member_dict ) gdmean = mean ( itervalues ( distances . distances ) ) gdstdev = stdev ( itervalues ( distances . distances ) ) self . reporters . info ( 'Mean genetic distance {0:.3f}, standard deviation {1:.3f}' . format ( gdmean , gdstdev ) )
Place genomes into species by genetic similarity .
26,265
def get_average_cross_validation_fitness ( self ) : avg_cross_validation_fitness = [ ] for stats in self . generation_cross_validation_statistics : scores = [ ] for fitness in stats . values ( ) : scores . extend ( fitness ) avg_cross_validation_fitness . append ( mean ( scores ) ) return avg_cross_validation_fitness
Get the per - generation average cross_validation fitness .
26,266
def best_unique_genomes ( self , n ) : best_unique = { } for g in self . most_fit_genomes : best_unique [ g . key ] = g best_unique_list = list ( best_unique . values ( ) ) def key ( genome ) : return genome . fitness return sorted ( best_unique_list , key = key , reverse = True ) [ : n ]
Returns the most n fit genomes with no duplication .
26,267
def best_genomes ( self , n ) : def key ( g ) : return g . fitness return sorted ( self . most_fit_genomes , key = key , reverse = True ) [ : n ]
Returns the n most fit genomes ever seen .
26,268
def save_genome_fitness ( self , delimiter = ' ' , filename = 'fitness_history.csv' , with_cross_validation = False ) : with open ( filename , 'w' ) as f : w = csv . writer ( f , delimiter = delimiter ) best_fitness = [ c . fitness for c in self . most_fit_genomes ] avg_fitness = self . get_fitness_mean ( ) if with_cross_validation : cv_best_fitness = [ c . cross_fitness for c in self . most_fit_genomes ] cv_avg_fitness = self . get_average_cross_validation_fitness ( ) for best , avg , cv_best , cv_avg in zip ( best_fitness , avg_fitness , cv_best_fitness , cv_avg_fitness ) : w . writerow ( [ best , avg , cv_best , cv_avg ] ) else : for best , avg in zip ( best_fitness , avg_fitness ) : w . writerow ( [ best , avg ] )
Saves the population s best and average fitness .
26,269
def save_species_count ( self , delimiter = ' ' , filename = 'speciation.csv' ) : with open ( filename , 'w' ) as f : w = csv . writer ( f , delimiter = delimiter ) for s in self . get_species_sizes ( ) : w . writerow ( s )
Log speciation throughout evolution .
26,270
def save_species_fitness ( self , delimiter = ' ' , null_value = 'NA' , filename = 'species_fitness.csv' ) : with open ( filename , 'w' ) as f : w = csv . writer ( f , delimiter = delimiter ) for s in self . get_species_fitness ( null_value ) : w . writerow ( s )
Log species average fitness throughout evolution .
26,271
def configure_new ( self , config ) : for node_key in config . output_keys : self . nodes [ node_key ] = self . create_node ( config , node_key ) if config . num_hidden > 0 : for i in range ( config . num_hidden ) : node_key = config . get_new_node_key ( self . nodes ) assert node_key not in self . nodes node = self . create_node ( config , node_key ) self . nodes [ node_key ] = node if 'fs_neat' in config . initial_connection : if config . initial_connection == 'fs_neat_nohidden' : self . connect_fs_neat_nohidden ( config ) elif config . initial_connection == 'fs_neat_hidden' : self . connect_fs_neat_hidden ( config ) else : if config . num_hidden > 0 : print ( "Warning: initial_connection = fs_neat will not connect to hidden nodes;" , "\tif this is desired, set initial_connection = fs_neat_nohidden;" , "\tif not, set initial_connection = fs_neat_hidden" , sep = '\n' , file = sys . stderr ) self . connect_fs_neat_nohidden ( config ) elif 'full' in config . initial_connection : if config . initial_connection == 'full_nodirect' : self . connect_full_nodirect ( config ) elif config . initial_connection == 'full_direct' : self . connect_full_direct ( config ) else : if config . num_hidden > 0 : print ( "Warning: initial_connection = full with hidden nodes will not do direct input-output connections;" , "\tif this is desired, set initial_connection = full_nodirect;" , "\tif not, set initial_connection = full_direct" , sep = '\n' , file = sys . stderr ) self . connect_full_nodirect ( config ) elif 'partial' in config . initial_connection : if config . initial_connection == 'partial_nodirect' : self . connect_partial_nodirect ( config ) elif config . initial_connection == 'partial_direct' : self . connect_partial_direct ( config ) else : if config . num_hidden > 0 : print ( "Warning: initial_connection = partial with hidden nodes will not do direct input-output connections;" , "\tif this is desired, set initial_connection = partial_nodirect {0};" . format ( config . connection_fraction ) , "\tif not, set initial_connection = partial_direct {0}" . format ( config . connection_fraction ) , sep = '\n' , file = sys . stderr ) self . connect_partial_nodirect ( config )
Configure a new genome based on the given configuration .
26,272
def configure_crossover ( self , genome1 , genome2 , config ) : assert isinstance ( genome1 . fitness , ( int , float ) ) assert isinstance ( genome2 . fitness , ( int , float ) ) if genome1 . fitness > genome2 . fitness : parent1 , parent2 = genome1 , genome2 else : parent1 , parent2 = genome2 , genome1 for key , cg1 in iteritems ( parent1 . connections ) : cg2 = parent2 . connections . get ( key ) if cg2 is None : self . connections [ key ] = cg1 . copy ( ) else : self . connections [ key ] = cg1 . crossover ( cg2 ) parent1_set = parent1 . nodes parent2_set = parent2 . nodes for key , ng1 in iteritems ( parent1_set ) : ng2 = parent2_set . get ( key ) assert key not in self . nodes if ng2 is None : self . nodes [ key ] = ng1 . copy ( ) else : self . nodes [ key ] = ng1 . crossover ( ng2 )
Configure a new genome by crossover from two parent genomes .
26,273
def connect_full_direct ( self , config ) : for input_id , output_id in self . compute_full_connections ( config , True ) : connection = self . create_connection ( config , input_id , output_id ) self . connections [ connection . key ] = connection
Create a fully - connected genome including direct input - output connections .
26,274
def interpret ( self , config_dict ) : value = config_dict . get ( self . name ) if value is None : if self . default is None : raise RuntimeError ( 'Missing configuration item: ' + self . name ) else : warnings . warn ( "Using default {!r} for '{!s}'" . format ( self . default , self . name ) , DeprecationWarning ) if ( str != self . value_type ) and isinstance ( self . default , self . value_type ) : return self . default else : value = self . default try : if str == self . value_type : return str ( value ) if int == self . value_type : return int ( value ) if bool == self . value_type : if value . lower ( ) == "true" : return True elif value . lower ( ) == "false" : return False else : raise RuntimeError ( self . name + " must be True or False" ) if float == self . value_type : return float ( value ) if list == self . value_type : return value . split ( " " ) except Exception : raise RuntimeError ( "Error interpreting config item '{}' with value {!r} and type {}" . format ( self . name , value , self . value_type ) ) raise RuntimeError ( "Unexpected configuration type: " + repr ( self . value_type ) )
Converts the config_parser output into the proper type supplies defaults if available and needed and checks for some errors .
26,275
def plot_spikes ( spikes , view = False , filename = None , title = None ) : t_values = [ t for t , I , v , u , f in spikes ] v_values = [ v for t , I , v , u , f in spikes ] u_values = [ u for t , I , v , u , f in spikes ] I_values = [ I for t , I , v , u , f in spikes ] f_values = [ f for t , I , v , u , f in spikes ] fig = plt . figure ( ) plt . subplot ( 4 , 1 , 1 ) plt . ylabel ( "Potential (mv)" ) plt . xlabel ( "Time (in ms)" ) plt . grid ( ) plt . plot ( t_values , v_values , "g-" ) if title is None : plt . title ( "Izhikevich's spiking neuron model" ) else : plt . title ( "Izhikevich's spiking neuron model ({0!s})" . format ( title ) ) plt . subplot ( 4 , 1 , 2 ) plt . ylabel ( "Fired" ) plt . xlabel ( "Time (in ms)" ) plt . grid ( ) plt . plot ( t_values , f_values , "r-" ) plt . subplot ( 4 , 1 , 3 ) plt . ylabel ( "Recovery (u)" ) plt . xlabel ( "Time (in ms)" ) plt . grid ( ) plt . plot ( t_values , u_values , "r-" ) plt . subplot ( 4 , 1 , 4 ) plt . ylabel ( "Current (I)" ) plt . xlabel ( "Time (in ms)" ) plt . grid ( ) plt . plot ( t_values , I_values , "r-o" ) if filename is not None : plt . savefig ( filename ) if view : plt . show ( ) plt . close ( ) fig = None return fig
Plots the trains for a single spiking neuron .
26,276
def isLoggedIn ( self ) : r = self . _cleanGet ( self . req_url . LOGIN , allow_redirects = False ) return "Location" in r . headers and "home" in r . headers [ "Location" ]
Sends a request to Facebook to check the login status
26,277
def setSession ( self , session_cookies ) : if not session_cookies or "c_user" not in session_cookies : return False try : self . _session . cookies = requests . cookies . merge_cookies ( self . _session . cookies , session_cookies ) self . _postLogin ( ) except Exception as e : log . exception ( "Failed loading session" ) self . _resetValues ( ) return False return True
Loads session cookies
26,278
def logout ( self ) : if not hasattr ( self , "_fb_h" ) : h_r = self . _post ( self . req_url . MODERN_SETTINGS_MENU , { "pmid" : "4" } ) self . _fb_h = re . search ( r'name=\\"h\\" value=\\"(.*?)\\"' , h_r . text ) . group ( 1 ) data = { "ref" : "mb" , "h" : self . _fb_h } r = self . _get ( self . req_url . LOGOUT , data ) self . _resetValues ( ) return r . ok
Safely logs out the client
26,279
def _getThread ( self , given_thread_id = None , given_thread_type = None ) : if given_thread_id is None : if self . _default_thread_id is not None : return self . _default_thread_id , self . _default_thread_type else : raise ValueError ( "Thread ID is not set" ) else : return given_thread_id , given_thread_type
Checks if thread ID is given checks if default is set and returns correct values
26,280
def setDefaultThread ( self , thread_id , thread_type ) : self . _default_thread_id = thread_id self . _default_thread_type = thread_type
Sets default thread to send messages to
26,281
def fetchThreads ( self , thread_location , before = None , after = None , limit = None ) : threads = [ ] last_thread_timestamp = None while True : if limit and len ( threads ) >= limit : break candidates = self . fetchThreadList ( before = last_thread_timestamp , thread_location = thread_location ) if len ( candidates ) > 1 : threads += candidates [ 1 : ] else : break last_thread_timestamp = threads [ - 1 ] . last_message_timestamp if ( before is not None and int ( last_thread_timestamp ) > before ) or ( after is not None and int ( last_thread_timestamp ) < after ) : break if before is not None or after is not None : for t in threads : last_message_timestamp = int ( t . last_message_timestamp ) if ( before is not None and last_message_timestamp > before ) or ( after is not None and last_message_timestamp < after ) : threads . remove ( t ) if limit and len ( threads ) > limit : return threads [ : limit ] return threads
Get all threads in thread_location . Threads will be sorted from newest to oldest .
26,282
def fetchAllUsersFromThreads ( self , threads ) : users = [ ] users_to_fetch = [ ] for thread in threads : if thread . type == ThreadType . USER : if thread . uid not in [ user . uid for user in users ] : users . append ( thread ) elif thread . type == ThreadType . GROUP : for user_id in thread . participants : if ( user_id not in [ user . uid for user in users ] and user_id not in users_to_fetch ) : users_to_fetch . append ( user_id ) for user_id , user in self . fetchUserInfo ( * users_to_fetch ) . items ( ) : users . append ( user ) return users
Get all users involved in threads .
26,283
def fetchAllUsers ( self ) : data = { "viewer" : self . _uid } j = self . _post ( self . req_url . ALL_USERS , query = data , fix_request = True , as_json = True ) if j . get ( "payload" ) is None : raise FBchatException ( "Missing payload while fetching users: {}" . format ( j ) ) users = [ ] for data in j [ "payload" ] . values ( ) : if data [ "type" ] in [ "user" , "friend" ] : if data [ "id" ] in [ "0" , 0 ] : continue users . append ( User . _from_all_fetch ( data ) ) return users
Gets all users the client is currently chatting with
26,284
def searchForPages ( self , name , limit = 10 ) : params = { "search" : name , "limit" : limit } j = self . graphql_request ( GraphQL ( query = GraphQL . SEARCH_PAGE , params = params ) ) return [ Page . _from_graphql ( node ) for node in j [ name ] [ "pages" ] [ "nodes" ] ]
Find and get page by its name
26,285
def searchForGroups ( self , name , limit = 10 ) : params = { "search" : name , "limit" : limit } j = self . graphql_request ( GraphQL ( query = GraphQL . SEARCH_GROUP , params = params ) ) return [ Group . _from_graphql ( node ) for node in j [ "viewer" ] [ "groups" ] [ "nodes" ] ]
Find and get group thread by its name
26,286
def searchForThreads ( self , name , limit = 10 ) : params = { "search" : name , "limit" : limit } j = self . graphql_request ( GraphQL ( query = GraphQL . SEARCH_THREAD , params = params ) ) rtn = [ ] for node in j [ name ] [ "threads" ] [ "nodes" ] : if node [ "__typename" ] == "User" : rtn . append ( User . _from_graphql ( node ) ) elif node [ "__typename" ] == "MessageThread" : rtn . append ( Group . _from_graphql ( node ) ) elif node [ "__typename" ] == "Page" : rtn . append ( Page . _from_graphql ( node ) ) elif node [ "__typename" ] == "Group" : pass else : log . warning ( "Unknown type {} in {}" . format ( repr ( node [ "__typename" ] ) , node ) ) return rtn
Find and get a thread by its name
26,287
def searchForMessageIDs ( self , query , offset = 0 , limit = 5 , thread_id = None ) : thread_id , thread_type = self . _getThread ( thread_id , None ) data = { "query" : query , "snippetOffset" : offset , "snippetLimit" : limit , "identifier" : "thread_fbid" , "thread_fbid" : thread_id , } j = self . _post ( self . req_url . SEARCH_MESSAGES , data , fix_request = True , as_json = True ) result = j [ "payload" ] [ "search_snippets" ] [ query ] snippets = result [ thread_id ] [ "snippets" ] if result . get ( thread_id ) else [ ] for snippet in snippets : yield snippet [ "message_id" ]
Find and get message IDs by query
26,288
def search ( self , query , fetch_messages = False , thread_limit = 5 , message_limit = 5 ) : data = { "query" : query , "snippetLimit" : thread_limit } j = self . _post ( self . req_url . SEARCH_MESSAGES , data , fix_request = True , as_json = True ) result = j [ "payload" ] [ "search_snippets" ] [ query ] if fetch_messages : search_method = self . searchForMessages else : search_method = self . searchForMessageIDs return { thread_id : search_method ( query , limit = message_limit , thread_id = thread_id ) for thread_id in result }
Searches for messages in all threads
26,289
def fetchUserInfo ( self , * user_ids ) : threads = self . fetchThreadInfo ( * user_ids ) users = { } for id_ , thread in threads . items ( ) : if thread . type == ThreadType . USER : users [ id_ ] = thread else : raise FBchatUserError ( "Thread {} was not a user" . format ( thread ) ) return users
Get users info from IDs unordered
26,290
def fetchPageInfo ( self , * page_ids ) : threads = self . fetchThreadInfo ( * page_ids ) pages = { } for id_ , thread in threads . items ( ) : if thread . type == ThreadType . PAGE : pages [ id_ ] = thread else : raise FBchatUserError ( "Thread {} was not a page" . format ( thread ) ) return pages
Get pages info from IDs unordered
26,291
def fetchGroupInfo ( self , * group_ids ) : threads = self . fetchThreadInfo ( * group_ids ) groups = { } for id_ , thread in threads . items ( ) : if thread . type == ThreadType . GROUP : groups [ id_ ] = thread else : raise FBchatUserError ( "Thread {} was not a group" . format ( thread ) ) return groups
Get groups info from IDs unordered
26,292
def fetchThreadInfo ( self , * thread_ids ) : queries = [ ] for thread_id in thread_ids : params = { "id" : thread_id , "message_limit" : 0 , "load_messages" : False , "load_read_receipts" : False , "before" : None , } queries . append ( GraphQL ( doc_id = "2147762685294928" , params = params ) ) j = self . graphql_requests ( * queries ) for i , entry in enumerate ( j ) : if entry . get ( "message_thread" ) is None : j [ i ] [ "message_thread" ] = { "thread_key" : { "other_user_id" : thread_ids [ i ] } , "thread_type" : "ONE_TO_ONE" , } pages_and_user_ids = [ k [ "message_thread" ] [ "thread_key" ] [ "other_user_id" ] for k in j if k [ "message_thread" ] . get ( "thread_type" ) == "ONE_TO_ONE" ] pages_and_users = { } if len ( pages_and_user_ids ) != 0 : pages_and_users = self . _fetchInfo ( * pages_and_user_ids ) rtn = { } for i , entry in enumerate ( j ) : entry = entry [ "message_thread" ] if entry . get ( "thread_type" ) == "GROUP" : _id = entry [ "thread_key" ] [ "thread_fbid" ] rtn [ _id ] = Group . _from_graphql ( entry ) elif entry . get ( "thread_type" ) == "ONE_TO_ONE" : _id = entry [ "thread_key" ] [ "other_user_id" ] if pages_and_users . get ( _id ) is None : raise FBchatException ( "Could not fetch thread {}" . format ( _id ) ) entry . update ( pages_and_users [ _id ] ) if entry [ "type" ] == ThreadType . USER : rtn [ _id ] = User . _from_graphql ( entry ) else : rtn [ _id ] = Page . _from_graphql ( entry ) else : raise FBchatException ( "{} had an unknown thread type: {}" . format ( thread_ids [ i ] , entry ) ) return rtn
Get threads info from IDs unordered
26,293
def fetchThreadMessages ( self , thread_id = None , limit = 20 , before = None ) : thread_id , thread_type = self . _getThread ( thread_id , None ) params = { "id" : thread_id , "message_limit" : limit , "load_messages" : True , "load_read_receipts" : True , "before" : before , } j = self . graphql_request ( GraphQL ( doc_id = "1860982147341344" , params = params ) ) if j . get ( "message_thread" ) is None : raise FBchatException ( "Could not fetch thread {}: {}" . format ( thread_id , j ) ) messages = [ Message . _from_graphql ( message ) for message in j [ "message_thread" ] [ "messages" ] [ "nodes" ] ] messages . reverse ( ) read_receipts = j [ "message_thread" ] [ "read_receipts" ] [ "nodes" ] for message in messages : for receipt in read_receipts : if int ( receipt [ "watermark" ] ) >= int ( message . timestamp ) : message . read_by . append ( receipt [ "actor" ] [ "id" ] ) return messages
Get the last messages in a thread
26,294
def fetchThreadList ( self , offset = None , limit = 20 , thread_location = ThreadLocation . INBOX , before = None ) : if offset is not None : log . warning ( "Using `offset` in `fetchThreadList` is no longer supported, " "since Facebook migrated to the use of GraphQL in this request. " "Use `before` instead." ) if limit > 20 or limit < 1 : raise FBchatUserError ( "`limit` should be between 1 and 20" ) if thread_location in ThreadLocation : loc_str = thread_location . value else : raise FBchatUserError ( '"thread_location" must be a value of ThreadLocation' ) params = { "limit" : limit , "tags" : [ loc_str ] , "before" : before , "includeDeliveryReceipts" : True , "includeSeqID" : False , } j = self . graphql_request ( GraphQL ( doc_id = "1349387578499440" , params = params ) ) rtn = [ ] for node in j [ "viewer" ] [ "message_threads" ] [ "nodes" ] : _type = node . get ( "thread_type" ) if _type == "GROUP" : rtn . append ( Group . _from_graphql ( node ) ) elif _type == "ONE_TO_ONE" : rtn . append ( User . _from_thread_fetch ( node ) ) else : raise FBchatException ( "Unknown thread type: {}, with data: {}" . format ( _type , node ) ) return rtn
Get thread list of your facebook account
26,295
def fetchUnread ( self ) : form = { "folders[0]" : "inbox" , "client" : "mercury" , "last_action_timestamp" : now ( ) - 60 * 1000 } j = self . _post ( self . req_url . UNREAD_THREADS , form , fix_request = True , as_json = True ) payload = j [ "payload" ] [ "unread_thread_fbids" ] [ 0 ] return payload [ "thread_fbids" ] + payload [ "other_user_fbids" ]
Get the unread thread list
26,296
def fetchImageUrl ( self , image_id ) : image_id = str ( image_id ) data = { "photo_id" : str ( image_id ) } j = self . _get ( ReqUrl . ATTACHMENT_PHOTO , query = data , fix_request = True , as_json = True ) url = get_jsmods_require ( j , 3 ) if url is None : raise FBchatException ( "Could not fetch image url from: {}" . format ( j ) ) return url
Fetches the url to the original image from an image attachment ID
26,297
def _getSendData ( self , message = None , thread_id = None , thread_type = ThreadType . USER ) : messageAndOTID = generateOfflineThreadingID ( ) timestamp = now ( ) data = { "client" : "mercury" , "author" : "fbid:{}" . format ( self . _uid ) , "timestamp" : timestamp , "source" : "source:chat:web" , "offline_threading_id" : messageAndOTID , "message_id" : messageAndOTID , "threading_id" : generateMessageID ( self . _client_id ) , "ephemeral_ttl_mode:" : "0" , } if thread_type in [ ThreadType . USER , ThreadType . PAGE ] : data [ "other_user_fbid" ] = thread_id elif thread_type == ThreadType . GROUP : data [ "thread_fbid" ] = thread_id if message is None : message = Message ( ) if message . text or message . sticker or message . emoji_size : data [ "action_type" ] = "ma-type:user-generated-message" if message . text : data [ "body" ] = message . text for i , mention in enumerate ( message . mentions ) : data [ "profile_xmd[{}][id]" . format ( i ) ] = mention . thread_id data [ "profile_xmd[{}][offset]" . format ( i ) ] = mention . offset data [ "profile_xmd[{}][length]" . format ( i ) ] = mention . length data [ "profile_xmd[{}][type]" . format ( i ) ] = "p" if message . emoji_size : if message . text : data [ "tags[0]" ] = "hot_emoji_size:" + message . emoji_size . name . lower ( ) else : data [ "sticker_id" ] = message . emoji_size . value if message . sticker : data [ "sticker_id" ] = message . sticker . uid if message . quick_replies : xmd = { "quick_replies" : [ ] } for quick_reply in message . quick_replies : q = dict ( ) q [ "content_type" ] = quick_reply . _type q [ "payload" ] = quick_reply . payload q [ "external_payload" ] = quick_reply . external_payload q [ "data" ] = quick_reply . data if quick_reply . is_response : q [ "ignore_for_webhook" ] = False if isinstance ( quick_reply , QuickReplyText ) : q [ "title" ] = quick_reply . title if not isinstance ( quick_reply , QuickReplyLocation ) : q [ "image_url" ] = quick_reply . image_url xmd [ "quick_replies" ] . append ( q ) if len ( message . quick_replies ) == 1 and message . quick_replies [ 0 ] . is_response : xmd [ "quick_replies" ] = xmd [ "quick_replies" ] [ 0 ] data [ "platform_xmd" ] = json . dumps ( xmd ) if message . reply_to_id : data [ "replied_to_message_id" ] = message . reply_to_id return data
Returns the data needed to send a request to SendURL
26,298
def _doSendRequest ( self , data , get_thread_id = False ) : j = self . _post ( self . req_url . SEND , data , fix_request = True , as_json = True ) fb_dtsg = get_jsmods_require ( j , 2 ) if fb_dtsg is not None : self . _payload_default [ "fb_dtsg" ] = fb_dtsg try : message_ids = [ ( action [ "message_id" ] , action [ "thread_fbid" ] ) for action in j [ "payload" ] [ "actions" ] if "message_id" in action ] if len ( message_ids ) != 1 : log . warning ( "Got multiple message ids' back: {}" . format ( message_ids ) ) if get_thread_id : return message_ids [ 0 ] else : return message_ids [ 0 ] [ 0 ] except ( KeyError , IndexError , TypeError ) as e : raise FBchatException ( "Error when sending message: " "No message IDs could be found: {}" . format ( j ) )
Sends the data to SendURL and returns the message ID or None on failure
26,299
def send ( self , message , thread_id = None , thread_type = ThreadType . USER ) : thread_id , thread_type = self . _getThread ( thread_id , thread_type ) data = self . _getSendData ( message = message , thread_id = thread_id , thread_type = thread_type ) return self . _doSendRequest ( data )
Sends a message to a thread