idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,900
def return_page ( page ) : try : hit_id = request . args [ 'hit_id' ] assignment_id = request . args [ 'assignment_id' ] worker_id = request . args [ 'worker_id' ] mode = request . args [ 'mode' ] return render_template ( page , hit_id = hit_id , assignment_id = assignment_id , worker_id = worker_id , mode = mode ) except : try : participant_id = request . args [ 'participant_id' ] return render_template ( page , participant_id = participant_id ) except : return error_response ( error_type = "{} args missing" . format ( page ) )
Return a rendered template .
154
5
12,901
def quitter ( ) : exp = experiment ( session ) exp . log ( "Quitter route was hit." ) return Response ( dumps ( { "status" : "success" } ) , status = 200 , mimetype = 'application/json' )
Overide the psiTurk quitter route .
54
10
12,902
def ad_address ( mode , hit_id ) : if mode == "debug" : address = '/complete' elif mode in [ "sandbox" , "live" ] : username = os . getenv ( 'psiturk_access_key_id' , config . get ( "psiTurk Access" , "psiturk_access_key_id" ) ) password = os . getenv ( 'psiturk_secret_access_id' , config . get ( "psiTurk Access" , "psiturk_secret_access_id" ) ) try : req = requests . get ( 'https://api.psiturk.org/api/ad/lookup/' + hit_id , auth = ( username , password ) ) except : raise ValueError ( 'api_server_not_reachable' ) else : if req . status_code == 200 : hit_address = req . json ( ) [ 'ad_id' ] else : raise ValueError ( "something here" ) if mode == "sandbox" : address = ( 'https://sandbox.ad.psiturk.org/complete/' + str ( hit_address ) ) elif mode == "live" : address = 'https://ad.psiturk.org/complete/' + str ( hit_address ) else : raise ValueError ( "Unknown mode: {}" . format ( mode ) ) return success_response ( field = "address" , data = address , request_type = "ad_address" )
Get the address of the ad on AWS .
339
9
12,903
def connect ( node_id , other_node_id ) : exp = experiment ( session ) # get the parameters direction = request_parameter ( parameter = "direction" , default = "to" ) if type ( direction == Response ) : return direction # check the nodes exist node = models . Node . query . get ( node_id ) if node is None : return error_response ( error_type = "/node/connect, node does not exist" ) other_node = models . Node . query . get ( other_node_id ) if other_node is None : return error_response ( error_type = "/node/connect, other node does not exist" , participant = node . participant ) # execute the request try : vectors = node . connect ( whom = other_node , direction = direction ) for v in vectors : assign_properties ( v ) # ping the experiment exp . vector_post_request ( node = node , vectors = vectors ) session . commit ( ) except : return error_response ( error_type = "/vector POST server error" , status = 403 , participant = node . participant ) return success_response ( field = "vectors" , data = [ v . __json__ ( ) for v in vectors ] , request_type = "vector post" )
Connect to another node .
272
5
12,904
def get_info ( node_id , info_id ) : exp = experiment ( session ) # check the node exists node = models . Node . query . get ( node_id ) if node is None : return error_response ( error_type = "/info, node does not exist" ) # execute the experiment method: info = models . Info . query . get ( info_id ) if info is None : return error_response ( error_type = "/info GET, info does not exist" , participant = node . participant ) elif ( info . origin_id != node . id and info . id not in [ t . info_id for t in node . transmissions ( direction = "incoming" , status = "received" ) ] ) : return error_response ( error_type = "/info GET, forbidden info" , status = 403 , participant = node . participant ) try : # ping the experiment exp . info_get_request ( node = node , infos = info ) session . commit ( ) except : return error_response ( error_type = "/info GET server error" , status = 403 , participant = node . participant ) # return the data return success_response ( field = "info" , data = info . __json__ ( ) , request_type = "info get" )
Get a specific info .
276
5
12,905
def transformation_post ( node_id , info_in_id , info_out_id ) : exp = experiment ( session ) # Get the parameters. transformation_type = request_parameter ( parameter = "transformation_type" , parameter_type = "known_class" , default = models . Transformation ) if type ( transformation_type ) == Response : return transformation_type # Check that the node etc. exists. node = models . Node . query . get ( node_id ) if node is None : return error_response ( error_type = "/transformation POST, node does not exist" ) info_in = models . Info . query . get ( info_in_id ) if info_in is None : return error_response ( error_type = "/transformation POST, info_in does not exist" , participant = node . participant ) info_out = models . Info . query . get ( info_out_id ) if info_out is None : return error_response ( error_type = "/transformation POST, info_out does not exist" , participant = node . participant ) try : # execute the request transformation = transformation_type ( info_in = info_in , info_out = info_out ) assign_properties ( transformation ) session . commit ( ) # ping the experiment exp . transformation_post_request ( node = node , transformation = transformation ) session . commit ( ) except : return error_response ( error_type = "/tranaformation POST failed" , participant = node . participant ) # return the data return success_response ( field = "transformation" , data = transformation . __json__ ( ) , request_type = "transformation post" )
Transform an info .
360
4
12,906
def api_notifications ( ) : event_type = request . values [ 'Event.1.EventType' ] assignment_id = request . values [ 'Event.1.AssignmentId' ] # Add the notification to the queue. db . logger . debug ( 'rq: Queueing %s with id: %s for worker_function' , event_type , assignment_id ) q . enqueue ( worker_function , event_type , assignment_id , None ) db . logger . debug ( 'rq: Submitted Queue Length: %d (%s)' , len ( q ) , ', ' . join ( q . job_ids ) ) return success_response ( request_type = "notification" )
Receive MTurk REST notifications .
157
8
12,907
def process ( source , target , rdfsonly , base = None , logger = logging ) : for link in source . match ( ) : s , p , o = link [ : 3 ] #SKip docheader statements if s == ( base or '' ) + '@docheader' : continue if p in RESOURCE_MAPPING : p = RESOURCE_MAPPING [ p ] if o in RESOURCE_MAPPING : o = RESOURCE_MAPPING [ o ] if p == VERSA_BASEIRI + 'refines' : tlinks = list ( source . match ( s , TYPE_REL ) ) if tlinks : if tlinks [ 0 ] [ TARGET ] == VERSA_BASEIRI + 'Resource' : p = I ( RDFS_NAMESPACE + 'subClassOf' ) elif tlinks [ 0 ] [ TARGET ] == VERSA_BASEIRI + 'Property' : p = I ( RDFS_NAMESPACE + 'subPropertyOf' ) if p == VERSA_BASEIRI + 'properties' : suri = I ( iri . absolutize ( s , base ) ) if base else s target . add ( ( URIRef ( o ) , URIRef ( RDFS_NAMESPACE + 'domain' ) , URIRef ( suri ) ) ) continue if p == VERSA_BASEIRI + 'value' : if o not in [ 'Literal' , 'IRI' ] : ouri = I ( iri . absolutize ( o , base ) ) if base else o target . add ( ( URIRef ( s ) , URIRef ( RDFS_NAMESPACE + 'range' ) , URIRef ( ouri ) ) ) continue s = URIRef ( s ) #Translate v:type to rdf:type p = RDF . type if p == TYPE_REL else URIRef ( p ) o = URIRef ( o ) if isinstance ( o , I ) else Literal ( o ) if not rdfsonly or p . startswith ( RDF_NAMESPACE ) or p . startswith ( RDFS_NAMESPACE ) : target . add ( ( s , p , o ) ) return
Prepare a statement into a triple ready for rdflib graph
508
14
12,908
def write ( models , base = None , graph = None , rdfsonly = False , prefixes = None , logger = logging ) : prefixes = prefixes or { } g = graph or rdflib . Graph ( ) #g.bind('bf', BFNS) #g.bind('bfc', BFCNS) #g.bind('bfd', BFDNS) g . bind ( 'v' , VNS ) for k , v in prefixes . items ( ) : g . bind ( k , v ) for m in models : base_out = m . base process ( m , g , rdfsonly , base = base_out , logger = logger ) return g
See the command line help
150
5
12,909
def routing_feature ( app ) : # enable regex routes app . url_map . converters [ 'regex' ] = RegexConverter urls = app . name . rsplit ( '.' , 1 ) [ 0 ] + '.urls.urls' # important issue ahead # see: https://github.com/projectshift/shift-boiler/issues/11 try : urls = import_string ( urls ) except ImportError as e : err = 'Failed to import {}. If it exists, check that it does not ' err += 'import something non-existent itself! ' err += 'Try to manually import it to debug.' raise ImportError ( err . format ( urls ) ) # add routes now for route in urls . keys ( ) : route_options = urls [ route ] route_options [ 'rule' ] = route app . add_url_rule ( * * route_options )
Add routing feature Allows to define application routes un urls . py file and use lazy views . Additionally enables regular exceptions in route definitions
200
26
12,910
def undoable ( method ) : def undoable_method ( self , * args ) : return self . do ( Command ( self , method , * args ) ) return undoable_method
Decorator undoable allows an instance method to be undone .
39
13
12,911
def get_template_directory ( self ) : dir = os . path . join ( os . path . dirname ( __file__ ) , 'templates' ) return dir
Get path to migrations templates This will get used when you run the db init command
37
17
12,912
async def pull_metrics ( self , event_fn , loop = None ) : if self . lazy and not self . ready : return None logger = self . get_logger ( ) ts = timer ( ) logger . trace ( "Waiting for process event" ) result = await self . process ( event_fn ) td = int ( timer ( ) - ts ) logger . trace ( "It took: {}ms" . format ( td ) ) self . _last_run = current_ts ( ) return result
Method called by core . Should not be overwritten .
110
11
12,913
def ready ( self ) : logger = self . get_logger ( ) now = current_ts ( ) logger . trace ( "Current time: {0}" . format ( now ) ) logger . trace ( "Last Run: {0}" . format ( self . _last_run ) ) delta = ( now - self . _last_run ) logger . trace ( "Delta: {0}, Interval: {1}" . format ( delta , self . interval * 1000 ) ) return delta > self . interval * 1000
Function used when agent is lazy . It is being processed only when ready condition is satisfied
110
17
12,914
def create_jwt ( self , expires_in = None ) : s = utils . sign_jwt ( data = { "id" : self . user . id } , secret_key = get_jwt_secret ( ) , salt = get_jwt_salt ( ) , expires_in = expires_in or get_jwt_ttl ( ) ) return s
Create a secure timed JWT token that can be passed . It save the user id which later will be used to retrieve the data
84
26
12,915
def sendgmail ( self , subject , recipients , plaintext , htmltext = None , cc = None , debug = False , useMIMEMultipart = True , gmail_account = 'kortemmelab@gmail.com' , pw_filepath = None ) : smtpserver = smtplib . SMTP ( "smtp.gmail.com" , 587 ) smtpserver . ehlo ( ) smtpserver . starttls ( ) smtpserver . ehlo gmail_account = 'kortemmelab@gmail.com' if pw_filepath : smtpserver . login ( gmail_account , read_file ( pw_filepath ) ) else : smtpserver . login ( gmail_account , read_file ( 'pw' ) ) for recipient in recipients : if htmltext : msg = MIMEText ( htmltext , 'html' ) msg [ 'From' ] = gmail_account msg [ 'To' ] = recipient msg [ 'Subject' ] = subject smtpserver . sendmail ( gmail_account , recipient , msg . as_string ( ) ) else : header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n' msg = header + '\n ' + plaintext + '\n\n' smtpserver . sendmail ( gmail_account , recipient , msg ) smtpserver . close ( )
For this function to work the password for the gmail user must be colocated with this file or passed in .
339
23
12,916
def show_one ( request , post_process_fun , object_class , id , template = 'common_json.html' ) : obj = get_object_or_404 ( object_class , pk = id ) json = post_process_fun ( request , obj ) return render_json ( request , json , template = template , help_text = show_one . __doc__ )
Return object of the given type with the specified identifier .
86
11
12,917
def show_more ( request , post_process_fun , get_fun , object_class , should_cache = True , template = 'common_json.html' , to_json_kwargs = None ) : if not should_cache and 'json_orderby' in request . GET : return render_json ( request , { 'error' : "Can't order the result according to the JSON field, because the caching for this type of object is turned off. See the documentation." } , template = 'questions_json.html' , help_text = show_more . __doc__ , status = 501 ) if not should_cache and 'all' in request . GET : return render_json ( request , { 'error' : "Can't get all objects, because the caching for this type of object is turned off. See the documentation." } , template = 'questions_json.html' , help_text = show_more . __doc__ , status = 501 ) if to_json_kwargs is None : to_json_kwargs = { } time_start = time_lib ( ) limit = min ( int ( request . GET . get ( 'limit' , 10 ) ) , 100 ) page = int ( request . GET . get ( 'page' , 0 ) ) try : objs = get_fun ( request , object_class ) if 'db_orderby' in request . GET : objs = objs . order_by ( ( '-' if 'desc' in request . GET else '' ) + request . GET [ 'db_orderby' ] . strip ( '/' ) ) if 'all' not in request . GET and 'json_orderby' not in request . GET : objs = objs [ page * limit : ( page + 1 ) * limit ] cache_key = 'proso_common_sql_json_%s' % hashlib . sha1 ( ( str ( objs . query ) + str ( to_json_kwargs ) ) . encode ( ) ) . hexdigest ( ) cached = cache . get ( cache_key ) if should_cache and cached : list_objs = json_lib . loads ( cached ) else : list_objs = [ x . to_json ( * * to_json_kwargs ) for x in list ( objs ) ] if should_cache : cache . set ( cache_key , json_lib . dumps ( list_objs ) , 60 * 60 * 24 * 30 ) LOGGER . debug ( 'loading objects in show_more view took %s seconds' , ( time_lib ( ) - time_start ) ) json = post_process_fun ( request , list_objs ) if 'json_orderby' in request . GET : time_before_json_sort = time_lib ( ) json . sort ( key = lambda x : ( - 1 if 'desc' in request . GET else 1 ) * x [ request . GET [ 'json_orderby' ] ] ) if 'all' not in request . GET : json = json [ page * limit : ( page + 1 ) * limit ] LOGGER . debug ( 'sorting objects according to JSON field took %s seconds' , ( time_lib ( ) - time_before_json_sort ) ) return render_json ( request , json , template = template , help_text = show_more . __doc__ ) except EmptyResultSet : return render_json ( request , [ ] , template = template , help_text = show_more . __doc__ )
Return list of objects of the given type .
765
9
12,918
def log ( request ) : if request . method == "POST" : log_dict = json_body ( request . body . decode ( "utf-8" ) ) if 'message' not in log_dict : return HttpResponseBadRequest ( 'There is no message to log!' ) levels = { 'debug' : JAVASCRIPT_LOGGER . debug , 'info' : JAVASCRIPT_LOGGER . info , 'warn' : JAVASCRIPT_LOGGER . warn , 'error' : JAVASCRIPT_LOGGER . error , } log_fun = JAVASCRIPT_LOGGER . info if 'level' in log_dict : log_fun = levels [ log_dict [ 'level' ] ] log_fun ( log_dict [ 'message' ] , extra = { 'request' : request , 'user' : request . user . id if request . user . is_authenticated ( ) else None , 'client_data' : json_lib . dumps ( log_dict . get ( 'data' , { } ) ) , } ) return HttpResponse ( 'ok' , status = 201 ) else : return render_json ( request , { } , template = 'common_log_service.html' , help_text = log . __doc__ )
Log an event from the client to the server .
285
10
12,919
def custom_config ( request ) : if request . method == 'POST' : config_dict = json_body ( request . body . decode ( 'utf-8' ) ) CustomConfig . objects . try_create ( config_dict [ 'app_name' ] , config_dict [ 'key' ] , config_dict [ 'value' ] , request . user . id , config_dict . get ( 'condition_key' ) if config_dict . get ( 'condition_key' ) else None , urllib . parse . unquote ( config_dict . get ( 'condition_value' ) ) if config_dict . get ( 'condition_value' ) else None ) return config ( request ) else : return render_json ( request , { } , template = 'common_custom_config.html' , help_text = custom_config . __doc__ )
Save user - specific configuration property .
189
7
12,920
def languages ( request ) : return render_json ( request , settings . LANGUAGE_DOMAINS if hasattr ( settings , 'LANGUAGE_DOMAINS' ) else { "error" : "Languages are not set. (Set LANGUAGE_DOMAINS in settings.py)" } , template = 'common_json.html' , help_text = languages . __doc__ )
Returns languages that are available in the system .
90
9
12,921
def channel_to_id ( slack , channel ) : channels = slack . api_call ( 'channels.list' ) . get ( 'channels' ) or [ ] groups = slack . api_call ( 'groups.list' ) . get ( 'groups' ) or [ ] if not channels and not groups : raise RuntimeError ( "Couldn't get channels and groups." ) ids = [ c [ 'id' ] for c in channels + groups if c [ 'name' ] == channel ] if not ids : raise ValueError ( f"Couldn't find #{channel}" ) return ids [ 0 ]
Surely there s a better way to do this ...
135
11
12,922
def send_message ( slack ) : channel = input ( 'Which channel would you like to message? ' ) message = input ( 'What should the message be? ' ) channel_id = channel_to_id ( slack , channel ) print ( f"Sending message to #{channel} (id: {channel_id})!" ) slack . rtm_send_message ( channel_id , message )
Prompt for and send a message to a channel .
87
11
12,923
def parse_device ( lines ) : name , status_line , device = parse_device_header ( lines . pop ( 0 ) ) # There are edge cases when the device list is empty and the status line is # merged with the header line, in those cases, the status line is returned # from parse_device_header(), the rest of the time, it's the next line. if not status_line : status_line = lines . pop ( 0 ) status = parse_device_status ( status_line , device [ "personality" ] ) bitmap = None resync = None for line in lines : if line . startswith ( " bitmap:" ) : bitmap = parse_device_bitmap ( line ) elif line . startswith ( " [" ) : resync = parse_device_resync_progress ( line ) elif line . startswith ( " \tresync=" ) : resync = parse_device_resync_standby ( line ) else : raise NotImplementedError ( "unknown device line: {0}" . format ( line ) ) device . update ( { "status" : status , "bitmap" : bitmap , "resync" : resync , } ) return ( name , device )
Parse all the lines of a device block .
270
10
12,924
def match_etag ( etag , header , weak = False ) : if etag is None : return False m = etag_re . match ( etag ) if not m : raise ValueError ( "Not a well-formed ETag: '%s'" % etag ) ( is_weak , etag ) = m . groups ( ) parsed_header = parse_etag_header ( header ) if parsed_header == '*' : return True if is_weak and not weak : return False if weak : return etag in [ t [ 1 ] for t in parsed_header ] else : return etag in [ t [ 1 ] for t in parsed_header if not t [ 0 ] ]
Try to match an ETag against a header value .
152
11
12,925
def datetime_to_httpdate ( dt ) : if isinstance ( dt , ( int , float ) ) : return format_date_time ( dt ) elif isinstance ( dt , datetime ) : return format_date_time ( datetime_to_timestamp ( dt ) ) else : raise TypeError ( "expected datetime.datetime or timestamp (int/float)," " got '%s'" % dt )
Convert datetime . datetime or Unix timestamp to HTTP date .
98
14
12,926
def timedelta_to_httpdate ( td ) : if isinstance ( td , ( int , float ) ) : return format_date_time ( time . time ( ) + td ) elif isinstance ( td , timedelta ) : return format_date_time ( time . time ( ) + total_seconds ( td ) ) else : raise TypeError ( "expected datetime.timedelta or number of seconds" "(int/float), got '%s'" % td )
Convert datetime . timedelta or number of seconds to HTTP date .
103
15
12,927
def cache_control ( max_age = None , private = False , public = False , s_maxage = None , must_revalidate = False , proxy_revalidate = False , no_cache = False , no_store = False ) : if all ( [ private , public ] ) : raise ValueError ( "'private' and 'public' are mutually exclusive" ) if isinstance ( max_age , timedelta ) : max_age = int ( total_seconds ( max_age ) ) if isinstance ( s_maxage , timedelta ) : s_maxage = int ( total_seconds ( s_maxage ) ) directives = [ ] if public : directives . append ( 'public' ) if private : directives . append ( 'private' ) if max_age is not None : directives . append ( 'max-age=%d' % max_age ) if s_maxage is not None : directives . append ( 's-maxage=%d' % s_maxage ) if no_cache : directives . append ( 'no-cache' ) if no_store : directives . append ( 'no-store' ) if must_revalidate : directives . append ( 'must-revalidate' ) if proxy_revalidate : directives . append ( 'proxy-revalidate' ) return ', ' . join ( directives )
Generate the value for a Cache - Control header .
293
11
12,928
def get_incidents ( self ) -> Union [ list , bool ] : brotts_entries_left = True incidents_today = [ ] url = self . url while brotts_entries_left : requests_response = requests . get ( url , params = self . parameters ) rate_limited = requests_response . headers . get ( 'x-ratelimit-reset' ) if rate_limited : print ( "You have been rate limited until " + time . strftime ( '%Y-%m-%d %H:%M:%S%z' , time . localtime ( rate_limited ) ) ) return False requests_response = requests_response . json ( ) incidents = requests_response . get ( "data" ) if not incidents : break datetime_today = datetime . date . today ( ) datetime_today_as_time = time . strptime ( str ( datetime_today ) , "%Y-%m-%d" ) today_date_ymd = self . _get_datetime_as_ymd ( datetime_today_as_time ) for incident in incidents : incident_pubdate = incident [ "pubdate_iso8601" ] incident_date = time . strptime ( incident_pubdate , "%Y-%m-%dT%H:%M:%S%z" ) incident_date_ymd = self . _get_datetime_as_ymd ( incident_date ) if today_date_ymd == incident_date_ymd : incidents_today . append ( incident ) else : brotts_entries_left = False break if requests_response . get ( "links" ) : url = requests_response [ "links" ] [ "next_page_url" ] else : break return incidents_today
Get today s incidents .
397
5
12,929
def from_template ( args ) : project_name = args . name template = args . template with tarfile . open ( template ) as tar : prefix = os . path . commonprefix ( tar . getnames ( ) ) check_template ( tar . getnames ( ) , prefix ) tar . extractall ( project_name , members = get_members ( tar , prefix ) )
Create a new oct project from existing template
80
8
12,930
def from_oct ( args ) : project_name = args . name env = Environment ( loader = PackageLoader ( 'oct.utilities' , 'templates' ) ) config_content = env . get_template ( 'configuration/config.json' ) . render ( script_name = 'v_user.py' ) script_content = env . get_template ( 'scripts/v_user.j2' ) . render ( ) try : os . makedirs ( project_name ) os . makedirs ( os . path . join ( project_name , 'test_scripts' ) ) os . makedirs ( os . path . join ( project_name , 'templates' ) ) os . makedirs ( os . path . join ( project_name , 'templates' , 'img' ) ) shutil . copytree ( os . path . join ( BASE_DIR , 'templates' , 'css' ) , os . path . join ( project_name , 'templates' , 'css' ) ) shutil . copytree ( os . path . join ( BASE_DIR , 'templates' , 'javascript' ) , os . path . join ( project_name , 'templates' , 'scripts' ) ) shutil . copytree ( os . path . join ( BASE_DIR , 'templates' , 'fonts' ) , os . path . join ( project_name , 'templates' , 'fonts' ) ) shutil . copy ( os . path . join ( BASE_DIR , 'templates' , 'html' , 'report.html' ) , os . path . join ( project_name , 'templates' ) ) except OSError : print ( 'ERROR: can not create directory for %r' % project_name , file = sys . stderr ) raise with open ( os . path . join ( project_name , 'config.json' ) , 'w' ) as f : f . write ( config_content ) with open ( os . path . join ( project_name , 'test_scripts' , 'v_user.py' ) , 'w' ) as f : f . write ( script_content )
Create a new oct project
477
5
12,931
def as_data_frame ( self ) -> pandas . DataFrame : header_gene = { } header_multiplex = { } headr_transitions = { } for gene in self . influence_graph . genes : header_gene [ gene ] = repr ( gene ) header_multiplex [ gene ] = f"active multiplex on {gene!r}" headr_transitions [ gene ] = f"K_{gene!r}" columns = defaultdict ( list ) for state in self . table . keys ( ) : for gene in self . influence_graph . genes : columns [ header_gene [ gene ] ] . append ( state [ gene ] ) columns [ header_multiplex [ gene ] ] . append ( self . _repr_multiplexes ( gene , state ) ) columns [ headr_transitions [ gene ] ] . append ( self . _repr_transition ( gene , state ) ) header = list ( header_gene . values ( ) ) + list ( header_multiplex . values ( ) ) + list ( headr_transitions . values ( ) ) return pandas . DataFrame ( columns , columns = header )
Create a panda DataFrame representation of the resource table .
252
12
12,932
def create ( self , r , r_ , R = 200 ) : x , y = give_dots ( R , r , r_ , spins = 20 ) xy = np . array ( [ x , y ] ) . T xy = np . array ( np . around ( xy ) , dtype = np . int64 ) xy = xy [ ( xy [ : , 0 ] >= - 250 ) & ( xy [ : , 1 ] >= - 250 ) & ( xy [ : , 0 ] < 250 ) & ( xy [ : , 1 ] < 250 ) ] xy = xy + 250 img = np . ones ( [ 500 , 500 ] , dtype = np . uint8 ) img [ : ] = 255 img [ xy [ : , 0 ] , xy [ : , 1 ] ] = 0 img = misc . imresize ( img , [ self . img_size , self . img_size ] ) fimg = img / 255.0 return fimg
Create new spirograph image with given arguments . Returned image is scaled to agent s preferred image size .
217
23
12,933
def hedonic_value ( self , novelty ) : lmax = gaus_pdf ( self . desired_novelty , self . desired_novelty , 4 ) pdf = gaus_pdf ( novelty , self . desired_novelty , 4 ) return pdf / lmax
Given the agent s desired novelty how good the novelty value is .
62
13
12,934
def evaluate ( self , artifact ) : if self . desired_novelty > 0 : return self . hedonic_value ( self . novelty ( artifact . obj ) ) return self . novelty ( artifact . obj ) / self . img_size , None
Evaluate the artifact with respect to the agents short term memory .
54
14
12,935
def learn ( self , spiro , iterations = 1 ) : for i in range ( iterations ) : self . stmem . train_cycle ( spiro . obj . flatten ( ) )
Train short term memory with given spirograph .
40
11
12,936
def plot_places ( self ) : from matplotlib import pyplot as plt fig , ax = plt . subplots ( ) x = [ ] y = [ ] if len ( self . arg_history ) > 1 : xs = [ ] ys = [ ] for p in self . arg_history : xs . append ( p [ 0 ] ) ys . append ( p [ 1 ] ) ax . plot ( xs , ys , color = ( 0.0 , 0.0 , 1.0 , 0.1 ) ) for a in self . A : if a . self_criticism == 'pass' : args = a . framings [ a . creator ] [ 'args' ] x . append ( args [ 0 ] ) y . append ( args [ 1 ] ) sc = ax . scatter ( x , y , marker = "x" , color = 'red' ) ax . set_xlim ( [ - 200 , 200 ] ) ax . set_ylim ( [ - 200 , 200 ] ) agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN" . format ( self . name , self . age , self . env_learning_method , self . env_learning_amount , self . env_learn_on_add , self . stmem . length , self . _novelty_threshold , self . _own_threshold , self . jump , self . search_width , self . move_radius ) if self . logger is not None : imname = os . path . join ( self . logger . folder , '{}.png' . format ( agent_vars ) ) plt . savefig ( imname ) plt . close ( ) fname = os . path . join ( self . logger . folder , '{}.txt' . format ( agent_vars ) ) with open ( fname , "w" ) as f : f . write ( " " . join ( [ str ( e ) for e in xs ] ) ) f . write ( "\n" ) f . write ( " " . join ( [ str ( e ) for e in ys ] ) ) f . write ( "\n" ) f . write ( " " . join ( [ str ( e ) for e in x ] ) ) f . write ( "\n" ) f . write ( " " . join ( [ str ( e ) for e in y ] ) ) f . write ( "\n" ) else : plt . show ( )
Plot places where the agent has been and generated a spirograph .
568
15
12,937
def destroy ( self , folder = None ) : ameans = [ ( 0 , 0 , 0 ) for _ in range ( 3 ) ] ret = [ self . save_info ( folder , ameans ) ] aiomas . run ( until = self . stop_slaves ( folder ) ) # Close and join the process pool nicely. self . _pool . close ( ) self . _pool . terminate ( ) self . _pool . join ( ) self . _env . shutdown ( ) return ret
Destroy the environment and the subprocesses .
107
9
12,938
def add ( self , r ) : id = r . get_residue_id ( ) if self . order : last_id = self . order [ - 1 ] # KAB - allow for multiresidue noncanonicals if id in self . order : raise colortext . Exception ( 'Warning: using code to "allow for multiresidue noncanonicals" - check this case manually.' ) id = '%s.%d' % ( str ( id ) , self . special_insertion_count ) self . special_insertion_count += 1 assert ( r . Chain == self . sequence [ last_id ] . Chain ) assert ( r . residue_type == self . sequence [ last_id ] . residue_type ) self . order . append ( id ) self . sequence [ id ] = r
Takes an id and a Residue r and adds them to the Sequence .
177
17
12,939
def set_type ( self , sequence_type ) : if not ( self . sequence_type ) : for id , r in self . sequence . iteritems ( ) : assert ( r . residue_type == None ) r . residue_type = sequence_type self . sequence_type = sequence_type
Set the type of a Sequence if it has not been set .
64
13
12,940
def from_sequence ( chain , list_of_residues , sequence_type = None ) : s = Sequence ( sequence_type ) count = 1 for ResidueAA in list_of_residues : s . add ( Residue ( chain , count , ResidueAA , sequence_type ) ) count += 1 return s
Takes in a chain identifier and protein sequence and returns a Sequence object of Residues indexed from 1 .
74
22
12,941
def substitution_scores_match ( self , other ) : overlap = set ( self . substitution_scores . keys ( ) ) . intersection ( set ( other . substitution_scores . keys ( ) ) ) for k in overlap : if not ( self . substitution_scores [ k ] == None or other . substitution_scores [ k ] == None ) : if self . substitution_scores [ k ] != other . substitution_scores [ k ] : return False return True
Check to make sure that the substitution scores agree . If one map has a null score and the other has a non - null score we trust the other s score and vice versa .
103
36
12,942
def merge ( self , other ) : our_element_frequencies = self . items their_element_frequencies = other . items for element_name , freq in sorted ( our_element_frequencies . iteritems ( ) ) : our_element_frequencies [ element_name ] = max ( our_element_frequencies . get ( element_name , 0 ) , their_element_frequencies . get ( element_name , 0 ) ) for element_name , freq in sorted ( their_element_frequencies . iteritems ( ) ) : if element_name not in our_element_frequencies : our_element_frequencies [ element_name ] = their_element_frequencies [ element_name ]
Merge two element counters . For all elements we take the max count from both counters .
166
18
12,943
def dump ( self , obj , fp ) : if not validate ( obj , self . _raw_schema ) : raise AvroTypeException ( self . _avro_schema , obj ) fastavro_write_data ( fp , obj , self . _raw_schema )
Serializes obj as an avro - format byte stream to the provided fp file - like object stream .
64
22
12,944
def dumps ( self , obj ) : out = BytesIO ( ) try : self . dump ( obj , out ) return out . getvalue ( ) finally : out . close ( )
Serializes obj to an avro - format byte array and returns it .
39
15
12,945
def loads ( self , data ) : st = BytesIO ( data ) try : return self . load ( st ) finally : st . close ( )
Deserializes the given byte array into an object and returns it .
32
14
12,946
def create ( cls , parent , child , relation_type , index = None ) : try : with db . session . begin_nested ( ) : obj = cls ( parent_id = parent . id , child_id = child . id , relation_type = relation_type , index = index ) db . session . add ( obj ) except IntegrityError : raise Exception ( "PID Relation already exists." ) # msg = "PIDRelation already exists: " \ # "{0} -> {1} ({2})".format( # parent_pid, child_pid, relation_type) # logger.exception(msg) # raise Exception(msg) return obj
Create a PID relation for given parent and child .
146
10
12,947
def relation_exists ( self , parent , child , relation_type ) : return PIDRelation . query . filter_by ( child_pid_id = child . id , parent_pid_id = parent . id , relation_type = relation_type ) . count ( ) > 0
Determine if given relation already exists .
62
9
12,948
def df ( unit = 'GB' ) : details = { } headers = [ 'Filesystem' , 'Type' , 'Size' , 'Used' , 'Available' , 'Capacity' , 'MountedOn' ] n = len ( headers ) unit = df_conversions [ unit ] p = subprocess . Popen ( args = [ 'df' , '-TP' ] , stdout = subprocess . PIPE ) # -P prevents line wrapping on long filesystem names stdout , stderr = p . communicate ( ) lines = stdout . split ( "\n" ) lines [ 0 ] = lines [ 0 ] . replace ( "Mounted on" , "MountedOn" ) . replace ( "1K-blocks" , "Size" ) . replace ( "1024-blocks" , "Size" ) assert ( lines [ 0 ] . split ( ) == headers ) lines = [ l . strip ( ) for l in lines if l . strip ( ) ] for line in lines [ 1 : ] : tokens = line . split ( ) if tokens [ 0 ] == 'none' : # skip uninteresting entries continue assert ( len ( tokens ) == n ) d = { } for x in range ( 1 , len ( headers ) ) : d [ headers [ x ] ] = tokens [ x ] d [ 'Size' ] = float ( d [ 'Size' ] ) / unit assert ( d [ 'Capacity' ] . endswith ( "%" ) ) d [ 'Use%' ] = d [ 'Capacity' ] d [ 'Used' ] = float ( d [ 'Used' ] ) / unit d [ 'Available' ] = float ( d [ 'Available' ] ) / unit d [ 'Using' ] = 100 * ( d [ 'Used' ] / d [ 'Size' ] ) # same as Use% but with more precision if d [ 'Type' ] . startswith ( 'ext' ) : pass d [ 'Using' ] += 5 # ext2, ext3, and ext4 reserve 5% by default else : ext3_filesystems = [ 'ganon:' , 'kortemmelab:' , 'albana:' ] for e3fs in ext3_filesystems : if tokens [ 0 ] . find ( e3fs ) != - 1 : d [ 'Using' ] += 5 # ext3 reserves 5% break details [ tokens [ 0 ] ] = d return details
A wrapper for the df shell command .
525
8
12,949
def url_replace ( context , field , value ) : query_string = context [ 'request' ] . GET . copy ( ) query_string [ field ] = value return query_string . urlencode ( )
To avoid GET params losing
46
5
12,950
def ellipsis_or_number ( context , paginator , current_page ) : # Checks is it first page chosen_page = int ( context [ 'request' ] . GET [ 'page' ] ) if 'page' in context [ 'request' ] . GET else 1 if current_page in ( chosen_page + 1 , chosen_page + 2 , chosen_page - 1 , chosen_page - 2 , paginator . num_pages , paginator . num_pages - 1 , 1 , 2 , chosen_page ) : return current_page if current_page in ( chosen_page + 3 , chosen_page - 3 ) : return '...'
To avoid display a long pagination bar
142
8
12,951
def create_items ( sender , instance , * * kwargs ) : if instance . item_id is None and instance . item is None : item = Item ( ) if hasattr ( instance , 'active' ) : item . active = getattr ( instance , 'active' ) item . save ( ) instance . item = item
When one of the defined objects is created initialize also its item .
70
13
12,952
def add_parent ( sender , instance , * * kwargs ) : if not kwargs [ 'created' ] : return for att in [ 'task' , 'context' ] : parent = getattr ( instance , att ) . item_id child = instance . item_id ItemRelation . objects . get_or_create ( parent_id = parent , child_id = child , visible = True , )
When a task instance is created create also an item relation .
90
12
12,953
def change_parent ( sender , instance , * * kwargs ) : if instance . id is None : return if len ( { 'task' , 'task_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'task' ] [ 0 ] if 'task' in diff else diff [ 'task_id' ] [ 0 ] parent_id = parent . item_id if isinstance ( parent , Task ) else Task . objects . get ( pk = parent ) . item_id child_id = instance . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . create ( parent_id = instance . task . item_id , child_id = child_id , visible = True ) if len ( { 'context' , 'context_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'context' ] [ 0 ] if 'context' in diff else diff [ 'context_id' ] [ 0 ] parent_id = parent . item_id if isinstance ( parent , Context ) else Context . objects . get ( pk = parent ) . item_id child_id = instance . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . create ( parent_id = instance . context . item_id , child_id = child_id , visible = False )
When the given task instance has changed . Look at task and context and change the corresponding item relation .
350
20
12,954
def delete_parent ( sender , instance , * * kwargs ) : ItemRelation . objects . filter ( child_id = instance . item_id ) . delete ( )
When the given task instance is deleted delete also the corresponding item relations .
38
14
12,955
def align_to_other ( self , other , mapping , self_root_pair , other_root_pair = None ) : if other_root_pair == None : other_root_pair = self_root_pair assert ( len ( self_root_pair ) == len ( other_root_pair ) ) unmoved_atom_names = [ ] new_coords = [ None for x in xrange ( len ( self_root_pair ) ) ] for atom in self . names : if atom in self_root_pair : i = self_root_pair . index ( atom ) assert ( new_coords [ i ] == None ) new_coords [ i ] = self . get_coords_for_name ( atom ) if atom in mapping : other_atom = mapping [ atom ] self . set_coords_for_name ( atom , other . get_coords_for_name ( other_atom ) ) else : unmoved_atom_names . append ( atom ) # Move unmoved coordinates after all other atoms have been moved (so that # references will have been moved already) if None in new_coords : print new_coords assert ( None not in new_coords ) ref_coords = [ other . get_coords_for_name ( x ) for x in other_root_pair ] # Calculate translation and rotation matrices U , new_centroid , ref_centroid = calc_rotation_translation_matrices ( ref_coords , new_coords ) for atom in unmoved_atom_names : original_coord = self . get_coords_for_name ( atom ) self . set_coords_for_name ( atom , rotate_and_translate_coord ( original_coord , U , new_centroid , ref_centroid ) ) self . chain = other . chain
root atoms are atom which all other unmapped atoms will be mapped off of
403
15
12,956
def pumper ( html_generator ) : source = html_generator ( ) parser = etree . HTMLPullParser ( events = ( 'start' , 'end' ) , remove_comments = True ) while True : for element in parser . read_events ( ) : yield element try : parser . feed ( next ( source ) ) except StopIteration : # forces close of any unclosed tags parser . feed ( '</html>' ) for element in parser . read_events ( ) : yield element break
Pulls HTML from source generator feeds it to the parser and yields DOM elements .
109
16
12,957
def date_to_long_form_string ( dt , locale_ = 'en_US.utf8' ) : if locale_ : old_locale = locale . getlocale ( ) locale . setlocale ( locale . LC_ALL , locale_ ) v = dt . strftime ( "%A %B %d %Y" ) if locale_ : locale . setlocale ( locale . LC_ALL , old_locale ) return v
dt should be a datetime . date object .
99
10
12,958
def static_get_pdb_object ( pdb_id , bio_cache = None , cache_dir = None ) : pdb_id = pdb_id . upper ( ) if bio_cache : return bio_cache . get_pdb_object ( pdb_id ) if cache_dir : # Check to see whether we have a cached copy of the PDB file filepath = os . path . join ( cache_dir , '{0}.pdb' . format ( pdb_id ) ) if os . path . exists ( filepath ) : return PDB . from_filepath ( filepath ) # Get any missing files from the RCSB and create cached copies if appropriate pdb_contents = retrieve_pdb ( pdb_id ) if cache_dir : write_file ( os . path . join ( cache_dir , "%s.pdb" % pdb_id ) , pdb_contents ) return PDB ( pdb_contents )
This method does not necessarily use a BioCache but it seems to fit here .
215
16
12,959
def rebuild_app ( app_name , quiet = False , force = True , without_exec = False , restart = False ) : user = 'cozy-{app_name}' . format ( app_name = app_name ) home = '{prefix}/{app_name}' . format ( prefix = PREFIX , app_name = app_name ) command_line = 'cd {home}' . format ( home = home ) command_line += ' && git pull' if force : command_line += ' && ([ -d node_modules ] && rm -rf node_modules || true)' command_line += ' && ([ -d .node-gyp ] && rm -rf .node-gyp || true)' command_line += ' && ([ -d .npm ] && rm -rf .npm || true)' command_line += ' && chown -R {user}:{user} .' . format ( user = user ) command_line += ' && sudo -u {user} env HOME={home} npm install --production' . format ( user = user , home = home ) if restart : command_line += ' && cozy-monitor update {app_name}' . format ( app_name = app_name ) command_line += ' && cozy-monitor restart {app_name}' . format ( app_name = app_name ) if not quiet : print 'Execute:' print command_line if not without_exec : result = helpers . cmd_exec ( command_line ) print result [ 'stdout' ] print result [ 'stderr' ] print result [ 'error' ]
Rebuild cozy apps with deletion of npm directory & new npm build
352
13
12,960
def rebuild_all_apps ( force = True , restart = False ) : cozy_apps = monitor . status ( only_cozy = True ) for app in cozy_apps . keys ( ) : rebuild_app ( app , force = force , restart = restart )
Get all cozy apps & rebuild npm repository
56
8
12,961
def restart_stopped_apps ( ) : cozy_apps = monitor . status ( only_cozy = True ) for app in cozy_apps . keys ( ) : state = cozy_apps [ app ] if state == 'up' : next elif state == 'down' : print 'Start {}' . format ( app ) rebuild_app ( app , force = False ) monitor . start ( app )
Restart all apps in stopped state
86
7
12,962
def migrate_2_node4 ( ) : helpers . cmd_exec ( 'npm install -g cozy-monitor cozy-controller' , show_output = True ) helpers . cmd_exec ( 'update-cozy-stack' , show_output = True ) helpers . cmd_exec ( 'update-all' , show_output = True ) helpers . cmd_exec ( 'rm /etc/supervisor/conf.d/cozy-indexer.conf' , show_output = True ) helpers . cmd_exec ( 'supervisorctl reload' , show_output = True ) helpers . wait_cozy_stack ( ) ssl . normalize_cert_dir ( ) helpers . cmd_exec ( 'apt-get update' , show_output = True ) helpers . cmd_exec ( 'echo "cozy cozy/nodejs_apt_list text " | debconf-set-selections' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y cozy-apt-node-list' , show_output = True ) helpers . cmd_exec ( 'apt-get update' , show_output = True ) helpers . cmd_exec ( 'apt-get remove -y nodejs-legacy' , show_output = True ) helpers . cmd_exec ( 'apt-get remove -y nodejs-dev' , show_output = True ) helpers . cmd_exec ( 'apt-get remove -y npm' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y nodejs' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y cozy' , show_output = True ) helpers . cmd_exec ( 'npm install -g cozy-monitor cozy-controller' , show_output = True ) rebuild_app ( 'data-system' ) rebuild_app ( 'home' ) rebuild_app ( 'proxy' ) helpers . cmd_exec ( 'supervisorctl restart cozy-controller' , show_output = True ) helpers . wait_cozy_stack ( ) rebuild_all_apps ( restart = True ) restart_stopped_apps ( ) helpers . cmd_exec ( 'apt-get install -y cozy' , show_output = True )
Migrate existing cozy to node4
493
7
12,963
def install_requirements ( ) : helpers . cmd_exec ( 'echo "cozy cozy/nodejs_apt_list text " | debconf-set-selections' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y cozy-apt-node-list' , show_output = True ) helpers . cmd_exec ( 'apt-get update' , show_output = True ) command_line = 'apt-get install -y nodejs' command_line += ' && apt-get install -y cozy-depends' return_code = helpers . cmd_exec ( command_line , show_output = True ) if return_code != 0 : sys . exit ( return_code ) weboob . install ( )
Install cozy requirements
165
3
12,964
def add_message ( self , text , type = None ) : key = self . _msg_key self . setdefault ( key , [ ] ) self [ key ] . append ( message ( type , text ) ) self . save ( )
Add a message with an optional type .
51
8
12,965
def pop_messages ( self , type = None ) : key = self . _msg_key messages = [ ] if type is None : messages = self . pop ( key , [ ] ) else : keep_messages = [ ] for msg in self . get ( key , [ ] ) : if msg . type == type : messages . append ( msg ) else : keep_messages . append ( msg ) if not keep_messages and key in self : del self [ key ] else : self [ key ] = keep_messages if messages : self . save ( ) return messages
Retrieve stored messages and remove them from the session .
124
11
12,966
def vote_random ( candidates , votes , n_winners ) : rcands = list ( candidates ) shuffle ( rcands ) rcands = rcands [ : min ( n_winners , len ( rcands ) ) ] best = [ ( i , 0.0 ) for i in rcands ] return best
Select random winners from the candidates .
66
7
12,967
def vote_least_worst ( candidates , votes , n_winners ) : worsts = { str ( c ) : 100000000.0 for c in candidates } for v in votes : for e in v : if worsts [ str ( e [ 0 ] ) ] > e [ 1 ] : worsts [ str ( e [ 0 ] ) ] = e [ 1 ] s = sorted ( worsts . items ( ) , key = lambda x : x [ 1 ] , reverse = True ) best = s [ : min ( n_winners , len ( candidates ) ) ] d = [ ] for e in best : for c in candidates : if str ( c ) == e [ 0 ] : d . append ( ( c , e [ 1 ] ) ) return d
Select least worst artifact as the winner of the vote .
163
11
12,968
def vote_best ( candidates , votes , n_winners ) : best = [ votes [ 0 ] [ 0 ] ] for v in votes [ 1 : ] : if v [ 0 ] [ 1 ] > best [ 0 ] [ 1 ] : best = [ v [ 0 ] ] return best
Select the artifact with the single best evaluation as the winner of the vote .
62
15
12,969
def _remove_zeros ( votes , fpl , cl , ranking ) : for v in votes : for r in v : if r not in fpl : v . remove ( r ) for c in cl : if c not in fpl : if c not in ranking : ranking . append ( ( c , 0 ) )
Remove zeros in IRV voting .
68
8
12,970
def _remove_last ( votes , fpl , cl , ranking ) : for v in votes : for r in v : if r == fpl [ - 1 ] : v . remove ( r ) for c in cl : if c == fpl [ - 1 ] : if c not in ranking : ranking . append ( ( c , len ( ranking ) + 1 ) )
Remove last candidate in IRV voting .
78
8
12,971
def vote_IRV ( candidates , votes , n_winners ) : # TODO: Check what is wrong in here. votes = [ [ e [ 0 ] for e in v ] for v in votes ] f = lambda x : Counter ( e [ 0 ] for e in x ) . most_common ( ) cl = list ( candidates ) ranking = [ ] fp = f ( votes ) fpl = [ e [ 0 ] for e in fp ] while len ( fpl ) > 1 : _remove_zeros ( votes , fpl , cl , ranking ) _remove_last ( votes , fpl , cl , ranking ) cl = fpl [ : - 1 ] fp = f ( votes ) fpl = [ e [ 0 ] for e in fp ] ranking . append ( ( fpl [ 0 ] , len ( ranking ) + 1 ) ) ranking = list ( reversed ( ranking ) ) return ranking [ : min ( n_winners , len ( ranking ) ) ]
Perform IRV voting based on votes .
211
9
12,972
def vote_mean ( candidates , votes , n_winners ) : sums = { str ( candidate ) : [ ] for candidate in candidates } for vote in votes : for v in vote : sums [ str ( v [ 0 ] ) ] . append ( v [ 1 ] ) for s in sums : sums [ s ] = sum ( sums [ s ] ) / len ( sums [ s ] ) ordering = list ( sums . items ( ) ) ordering . sort ( key = operator . itemgetter ( 1 ) , reverse = True ) best = ordering [ : min ( n_winners , len ( ordering ) ) ] d = [ ] for e in best : for c in candidates : if str ( c ) == e [ 0 ] : d . append ( ( c , e [ 1 ] ) ) return d
Perform mean voting based on votes .
170
8
12,973
def vote ( self , candidates ) : ranks = [ ( c , self . evaluate ( c ) [ 0 ] ) for c in candidates ] ranks . sort ( key = operator . itemgetter ( 1 ) , reverse = True ) return ranks
Rank artifact candidates .
50
4
12,974
def add_candidate ( self , artifact ) : self . candidates . append ( artifact ) self . _log ( logging . DEBUG , "CANDIDATES appended:'{}'" . format ( artifact ) )
Add candidate artifact to the list of current candidates .
45
10
12,975
def validate_candidates ( self , candidates ) : valid_candidates = set ( candidates ) for a in self . get_agents ( addr = False ) : vc = set ( a . validate ( candidates ) ) valid_candidates = valid_candidates . intersection ( vc ) return list ( valid_candidates )
Validate the candidate artifacts with the agents in the environment .
69
12
12,976
def gather_votes ( self , candidates ) : votes = [ ] for a in self . get_agents ( addr = False ) : vote = a . vote ( candidates ) votes . append ( vote ) return votes
Gather votes for the given candidates from the agents in the environment .
44
14
12,977
def get_managers ( self ) : if self . _single_env : return None if not hasattr ( self , '_managers' ) : self . _managers = self . env . get_slave_managers ( ) return self . _managers
Get managers for the slave environments .
57
7
12,978
def gather_votes ( self ) : async def slave_task ( addr , candidates ) : r_manager = await self . env . connect ( addr ) return await r_manager . gather_votes ( candidates ) if len ( self . candidates ) == 0 : self . _log ( logging . DEBUG , "Could not gather votes because there " "are no candidates!" ) self . _votes = [ ] return self . _log ( logging . DEBUG , "Gathering votes for {} candidates." . format ( len ( self . candidates ) ) ) if self . _single_env : self . _votes = self . env . gather_votes ( self . candidates ) else : mgrs = self . get_managers ( ) tasks = create_tasks ( slave_task , mgrs , self . candidates ) self . _votes = run ( tasks )
Gather votes from all the underlying slave environments for the current list of candidates .
180
16
12,979
def gather_candidates ( self ) : async def slave_task ( addr ) : r_manager = await self . env . connect ( addr ) return await r_manager . get_candidates ( ) if self . _single_env : self . _candidates = self . env . candidates else : mgrs = self . get_managers ( ) tasks = create_tasks ( slave_task , mgrs ) self . _candidates = run ( tasks )
Gather candidates from the slave environments .
101
8
12,980
def clear_candidates ( self , clear_env = True ) : async def slave_task ( addr ) : r_manager = await self . env . connect ( addr ) return await r_manager . clear_candidates ( ) self . _candidates = [ ] if clear_env : if self . _single_env : self . env . clear_candidates ( ) else : mgrs = self . get_managers ( ) run ( create_tasks ( slave_task , mgrs ) )
Clear the current candidates .
110
5
12,981
def validate_candidates ( self ) : async def slave_task ( addr , candidates ) : r_manager = await self . env . connect ( addr ) return await r_manager . validate_candidates ( candidates ) self . _log ( logging . DEBUG , "Validating {} candidates" . format ( len ( self . candidates ) ) ) candidates = self . candidates if self . _single_env : self . _candidates = self . env . validate_candidates ( candidates ) else : mgrs = self . get_managers ( ) tasks = create_tasks ( slave_task , mgrs , candidates , flatten = False ) rets = run ( tasks ) valid_candidates = set ( self . candidates ) for r in rets : valid_candidates = valid_candidates . intersection ( set ( r ) ) self . _candidates = list ( valid_candidates ) self . _log ( logging . DEBUG , "{} candidates after validation" . format ( len ( self . candidates ) ) )
Validate current candidates .
218
5
12,982
def gather_and_vote ( self , voting_method , validate = False , winners = 1 , * * kwargs ) : self . gather_candidates ( ) if validate : self . validate_candidates ( ) self . gather_votes ( ) r = self . compute_results ( voting_method , self . votes , winners = winners , * * kwargs ) return r
Convenience function to gathering candidates and votes and performing voting using them .
82
15
12,983
def start_device ( name , frontend , backend ) : device = getattr ( devices , name ) device ( frontend , backend )
Start specified device
29
3
12,984
def start ( self , transaction_context = None ) : transaction_context = transaction_context or { } context_cmd = { 'command' : 'set_transaction_context' , 'msg' : transaction_context } self . publish ( context_cmd ) self . publish ( self . START )
Publish start message to all turrets
64
7
12,985
def process_message ( self , message , is_started = False ) : if not self . master : return False if 'status' not in message : return False message [ 'name' ] = message [ 'turret' ] del message [ 'turret' ] if not self . add ( message , is_started ) : return self . update ( message ) return True
Process incomming message from turret
78
7
12,986
def add ( self , turret_data , is_started = False ) : if turret_data . get ( 'uuid' ) in self . turrets : return False turret = Turret ( * * turret_data ) self . write ( turret ) self . turrets [ turret . uuid ] = turret if is_started : self . publish ( self . START , turret . uuid ) return True
Add a turret object to current turrets configuration
83
8
12,987
def update ( self , turret_data ) : if turret_data . get ( 'uuid' ) not in self . turrets : return False turret = self . turrets [ turret_data . get ( 'uuid' ) ] turret . update ( * * turret_data ) self . write ( turret ) return True
Update a given turret
66
4
12,988
def publish ( self , message , channel = None ) : if not self . master : return channel = channel or '' data = json . dumps ( message ) self . publisher . send_string ( "%s %s" % ( channel , data ) )
Publish a message for all turrets
52
7
12,989
def open_recruitment ( self , n = 1 ) : from psiturk . amt_services import MTurkServices , RDSServices from psiturk . psiturk_shell import PsiturkNetworkShell from psiturk . psiturk_org_services import PsiturkOrgServices psiturk_access_key_id = os . getenv ( "psiturk_access_key_id" , self . config . get ( "psiTurk Access" , "psiturk_access_key_id" ) ) psiturk_secret_access_id = os . getenv ( "psiturk_secret_access_id" , self . config . get ( "psiTurk Access" , "psiturk_secret_access_id" ) ) web_services = PsiturkOrgServices ( psiturk_access_key_id , psiturk_secret_access_id ) aws_rds_services = RDSServices ( self . aws_access_key_id , self . aws_secret_access_key , self . aws_region ) self . amt_services = MTurkServices ( self . aws_access_key_id , self . aws_secret_access_key , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) self . shell = PsiturkNetworkShell ( self . config , self . amt_services , aws_rds_services , web_services , self . server , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) try : participants = Participant . query . all ( ) assert ( participants ) except Exception : # Create the first HIT. self . shell . hit_create ( n , self . config . get ( 'HIT Configuration' , 'base_payment' ) , self . config . get ( 'HIT Configuration' , 'duration' ) ) else : # HIT was already created, no need to recreate it. print "Reject recruitment reopening: experiment has started."
Open recruitment for the first HIT unless it s already open .
479
12
12,990
def approve_hit ( self , assignment_id ) : from psiturk . amt_services import MTurkServices self . amt_services = MTurkServices ( self . aws_access_key_id , self . aws_secret_access_key , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) return self . amt_services . approve_worker ( assignment_id )
Approve the HIT .
102
6
12,991
def reward_bonus ( self , assignment_id , amount , reason ) : from psiturk . amt_services import MTurkServices self . amt_services = MTurkServices ( self . aws_access_key_id , self . aws_secret_access_key , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) return self . amt_services . bonus_worker ( assignment_id , amount , reason )
Reward the Turker with a bonus .
111
8
12,992
def keys ( cls ) : if cls . _cache_keys is None : cls . _cache_keys = [ c . name for c in cls . __table__ . _columns ] return cls . _cache_keys
return list of all declared columns .
52
7
12,993
def random ( cls , engine_or_session , limit = 5 ) : ses , auto_close = ensure_session ( engine_or_session ) result = ses . query ( cls ) . order_by ( func . random ( ) ) . limit ( limit ) . all ( ) if auto_close : # pragma: no cover ses . close ( ) return result
Return random ORM instance .
83
6
12,994
def main ( ) : import argparse from pkg_resources import require parser = argparse . ArgumentParser ( description = main . __doc__ ) parser . add_argument ( "data" , nargs = argparse . REMAINDER , help = "Floating point data, any delimiter." ) parser . add_argument ( "--version" , "-v" , action = "store_true" , help = "Display the version number and exit." ) args = parser . parse_args ( ) if args . version : version = require ( "pysparklines" ) [ 0 ] . version print ( version ) sys . exit ( 0 ) if os . isatty ( 0 ) and not args . data : parser . print_help ( ) sys . exit ( 1 ) elif args . data : arg_string = u' ' . join ( args . data ) else : arg_string = sys . stdin . read ( ) try : output = sparkify ( guess_series ( arg_string ) ) except : sys . stderr . write ( "Could not convert input data to valid sparkline\n" ) sys . exit ( 1 ) print ( output . encode ( 'utf-8' , 'ignore' ) )
u Reads from command line args or stdin and prints a sparkline from the data . Requires at least 2 data points as input .
264
28
12,995
def is_active ( self , state : 'State' ) -> bool : # Remove the genes which does not contribute to the multiplex sub_state = state . sub_state_by_gene_name ( * self . expression . variables ) # If this state is not in the cache if sub_state not in self . _is_active : params = self . _transform_state_to_dict ( sub_state ) # We add the result of the expression for this state of the multiplex to the cache self . _is_active [ sub_state ] = self . expression . evaluate ( * * params ) return self . _is_active [ sub_state ]
Return True if the multiplex is active in the given state false otherwise .
143
15
12,996
def get ( self , key ) : try : layers = key . split ( '.' ) value = self . registrar for key in layers : value = value [ key ] return value except : return None
Function deeply gets the key with . notation
42
8
12,997
def set ( self , key , value ) : target = self . registrar for element in key . split ( '.' ) [ : - 1 ] : target = target . setdefault ( element , dict ( ) ) target [ key . split ( "." ) [ - 1 ] ] = value
Function deeply sets the key with . notation
61
8
12,998
def boot ( cls , * args , * * kwargs ) : if cls . accessor is not None : if cls . instance is None : cls . instance = cls . accessor ( * args , * * kwargs )
Function creates the instance of accessor with dynamic positional & keyword arguments .
54
14
12,999
def register ( cls , config = { } ) : if cls . accessor is not None : if cls . instance is None : cls . instance = cls . accessor ( config )
This function is basically a shortcut of boot for accessors that have only the config dict argument .
43
19