idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
2,100 | def tearpage ( filename , bibtex = None , force = None ) : # Fetch pages to tear pages_to_tear = [ ] if force is not None : pages_to_tear = force elif bibtex is not None : pages_to_tear = tearpage_needed ( bibtex ) if len ( pages_to_tear ) > 0 : # If tearing is needed, do it and return True tearpage_backend ( filename , teared_pages = pages_to_tear ) return True # Else, simply return False return False | Tear some pages of the file if needed . | 125 | 10 |
2,101 | def edit ( filename , connection = None ) : c = connection or connect ( ) rev = c . ls ( filename ) if rev : rev [ 0 ] . edit ( ) | Checks out a file into the default changelist | 36 | 10 |
2,102 | def sync ( filename , connection = None ) : c = connection or connect ( ) rev = c . ls ( filename ) if rev : rev [ 0 ] . sync ( ) | Syncs a file | 36 | 4 |
2,103 | def open ( filename , connection = None ) : c = connection or connect ( ) res = c . ls ( filename ) if res and res [ 0 ] . revision : res [ 0 ] . edit ( ) else : c . add ( filename ) | Edits or Adds a filename ensuring the file is in perforce and editable | 51 | 16 |
2,104 | def is_valid ( arxiv_id ) : match = REGEX . match ( arxiv_id ) return ( match is not None ) and ( match . group ( 0 ) == arxiv_id ) | Check that a given arXiv ID is a valid one . | 47 | 13 |
2,105 | def get_bibtex ( arxiv_id ) : # Fetch bibtex using arxiv2bib module try : bibtex = arxiv2bib . arxiv2bib ( [ arxiv_id ] ) except HTTPError : bibtex = [ ] for bib in bibtex : if isinstance ( bib , arxiv2bib . ReferenceErrorInfo ) : continue else : # Return fetched bibtex return bib . bibtex ( ) # An error occurred, return None return None | Get a BibTeX entry for a given arXiv ID . | 121 | 13 |
2,106 | def extract_from_text ( text ) : # Remove the leading "arxiv:". return tools . remove_duplicates ( [ re . sub ( "arxiv:" , "" , i [ 0 ] , flags = re . IGNORECASE ) for i in REGEX . findall ( text ) if i [ 0 ] != '' ] ) | Extract arXiv IDs from a text . | 76 | 10 |
2,107 | def from_doi ( doi ) : try : request = requests . get ( "http://export.arxiv.org/api/query" , params = { "search_query" : "doi:%s" % ( doi , ) , "max_results" : 1 } ) request . raise_for_status ( ) except RequestException : return None root = xml . etree . ElementTree . fromstring ( request . content ) for entry in root . iter ( "{http://www.w3.org/2005/Atom}entry" ) : arxiv_id = entry . find ( "{http://www.w3.org/2005/Atom}id" ) . text # arxiv_id is an arXiv full URL. We only want the id which is the last # URL component. return arxiv_id . split ( "/" ) [ - 1 ] return None | Get the arXiv eprint id for a given DOI . | 195 | 13 |
2,108 | def get_sources ( arxiv_id ) : try : request = requests . get ( ARXIV_EPRINT_URL . format ( arxiv_id = arxiv_id ) ) request . raise_for_status ( ) file_object = io . BytesIO ( request . content ) return tarfile . open ( fileobj = file_object ) except ( RequestException , AssertionError , tarfile . TarError ) : return None | Download sources on arXiv for a given preprint . | 101 | 12 |
2,109 | def extractDates ( inp , tz = None , now = None ) : service = DateService ( tz = tz , now = now ) return service . extractDates ( inp ) | Extract semantic date information from an input string . This is a convenience method which would only be used if you d rather not initialize a DateService object . | 43 | 31 |
2,110 | def extractTimes ( self , inp ) : def handleMatch ( time ) : relative = False if not time : return None # Default times: 8am, 12pm, 7pm elif time . group ( 1 ) == 'morning' : h = 8 m = 0 elif time . group ( 1 ) == 'afternoon' : h = 12 m = 0 elif time . group ( 1 ) == 'evening' : h = 19 m = 0 elif time . group ( 4 ) and time . group ( 5 ) : h , m = 0 , 0 # Extract hours difference converter = NumberService ( ) try : diff = converter . parse ( time . group ( 4 ) ) except : return None if time . group ( 5 ) == 'hours' : h += diff else : m += diff # Extract minutes difference if time . group ( 6 ) : converter = NumberService ( ) try : diff = converter . parse ( time . group ( 7 ) ) except : return None if time . group ( 8 ) == 'hours' : h += diff else : m += diff relative = True else : # Convert from "HH:MM pm" format t = time . group ( 2 ) h , m = int ( t . split ( ':' ) [ 0 ] ) % 12 , int ( t . split ( ':' ) [ 1 ] ) try : if time . group ( 3 ) == 'pm' : h += 12 except IndexError : pass if relative : return self . now + datetime . timedelta ( hours = h , minutes = m ) else : return datetime . datetime ( self . now . year , self . now . month , self . now . day , h , m ) inp = self . _preprocess ( inp ) return [ handleMatch ( time ) for time in self . _timeRegex . finditer ( inp ) ] | Extracts time - related information from an input string . Ignores any information related to the specific date focusing on the time - of - day . | 393 | 30 |
2,111 | def extractDates ( self , inp ) : def merge ( param ) : day , time = param if not ( day or time ) : return None if not day : return time if not time : return day return datetime . datetime ( day . year , day . month , day . day , time . hour , time . minute ) days = self . extractDays ( inp ) times = self . extractTimes ( inp ) return map ( merge , zip_longest ( days , times , fillvalue = None ) ) | Extract semantic date information from an input string . In effect runs both parseDay and parseTime on the input string and merges the results to produce a comprehensive datetime object . | 111 | 36 |
2,112 | def extractDate ( self , inp ) : dates = self . extractDates ( inp ) for date in dates : return date return None | Returns the first date found in the input string or None if not found . | 30 | 15 |
2,113 | def convertDay ( self , day , prefix = "" , weekday = False ) : def sameDay ( d1 , d2 ) : d = d1 . day == d2 . day m = d1 . month == d2 . month y = d1 . year == d2 . year return d and m and y tom = self . now + datetime . timedelta ( days = 1 ) if sameDay ( day , self . now ) : return "today" elif sameDay ( day , tom ) : return "tomorrow" if weekday : dayString = day . strftime ( "%A, %B %d" ) else : dayString = day . strftime ( "%B %d" ) # Ex) Remove '0' from 'August 03' if not int ( dayString [ - 2 ] ) : dayString = dayString [ : - 2 ] + dayString [ - 1 ] return prefix + " " + dayString | Convert a datetime object representing a day into a human - ready string that can be read spoken aloud etc . | 198 | 23 |
2,114 | def convertTime ( self , time ) : # if ':00', ignore reporting minutes m_format = "" if time . minute : m_format = ":%M" timeString = time . strftime ( "%I" + m_format + " %p" ) # if '07:30', cast to '7:30' if not int ( timeString [ 0 ] ) : timeString = timeString [ 1 : ] return timeString | Convert a datetime object representing a time into a human - ready string that can be read spoken aloud etc . | 95 | 23 |
2,115 | def convertDate ( self , date , prefix = "" , weekday = False ) : dayString = self . convertDay ( date , prefix = prefix , weekday = weekday ) timeString = self . convertTime ( date ) return dayString + " at " + timeString | Convert a datetime object representing into a human - ready string that can be read spoken aloud etc . In effect runs both convertDay and convertTime on the input merging the results . | 55 | 37 |
2,116 | def _move ( self ) : newpath = self . action [ 'newpath' ] try : self . fs . move ( self . fp , newpath ) except OSError : raise tornado . web . HTTPError ( 400 ) return newpath | Called during a PUT request where the action specifies a move operation . Returns resource URI of the destination file . | 54 | 23 |
2,117 | def _copy ( self ) : copypath = self . action [ 'copypath' ] try : self . fs . copy ( self . fp , copypath ) except OSError : raise tornado . web . HTTPError ( 400 ) return copypath | Called during a PUT request where the action specifies a copy operation . Returns resource URI of the new file . | 58 | 23 |
2,118 | def _rename ( self ) : newname = self . action [ 'newname' ] try : newpath = self . fs . rename ( self . fp , newname ) except OSError : raise tornado . web . HTTPError ( 400 ) return newpath | Called during a PUT request where the action specifies a rename operation . Returns resource URI of the renamed file . | 58 | 23 |
2,119 | def get ( self ) : res = self . fs . get_filesystem_details ( ) res = res . to_dict ( ) self . write ( res ) | Return details for the filesystem including configured volumes . | 35 | 9 |
2,120 | def put ( self ) : self . fp = self . get_body_argument ( 'filepath' ) self . action = self . get_body_argument ( 'action' ) try : ptype = self . fs . get_type_from_path ( self . fp ) except OSError : raise tornado . web . HTTPError ( 404 ) if ptype == 'directory' : self . handler_name = 'filesystem:directories-details' else : self . handler_name = 'filesystem:files-details' if self . action [ 'action' ] == 'move' : newpath = self . _move ( ) self . write ( { 'filepath' : newpath } ) elif self . action [ 'action' ] == 'copy' : newpath = self . _copy ( ) self . write ( { 'filepath' : newpath } ) elif self . action [ 'action' ] == 'rename' : newpath = self . _rename ( ) self . write ( { 'filepath' : newpath } ) else : raise tornado . web . HTTPError ( 400 ) | Provides move copy and rename functionality . An action must be specified when calling this method . | 244 | 18 |
2,121 | def post ( self , * args ) : filepath = self . get_body_argument ( 'filepath' ) if not self . fs . exists ( filepath ) : raise tornado . web . HTTPError ( 404 ) Filewatcher . add_directory_to_watch ( filepath ) self . write ( { 'msg' : 'Watcher added for {}' . format ( filepath ) } ) | Start a new filewatcher at the specified path . | 86 | 11 |
2,122 | def delete ( self , filepath ) : Filewatcher . remove_directory_to_watch ( filepath ) self . write ( { 'msg' : 'Watcher deleted for {}' . format ( filepath ) } ) | Stop and delete the specified filewatcher . | 48 | 9 |
2,123 | def get ( self , filepath ) : try : res = self . fs . get_file_details ( filepath ) res = res . to_dict ( ) self . write ( res ) except OSError : raise tornado . web . HTTPError ( 404 ) | Get file details for the specified file . | 57 | 8 |
2,124 | def put ( self , filepath ) : action = self . get_body_argument ( 'action' ) if action [ 'action' ] == 'update_group' : newgrp = action [ 'group' ] try : self . fs . update_group ( filepath , newgrp ) self . write ( { 'msg' : 'Updated group for {}' . format ( filepath ) } ) except OSError : raise tornado . web . HTTPError ( 404 ) elif action [ 'action' ] == 'update_permissions' : newperms = action [ 'permissions' ] try : self . fs . update_permissions ( filepath , newperms ) self . write ( { 'msg' : 'Updated permissions for {}' . format ( filepath ) } ) except OSError : raise tornado . web . HTTPError ( 404 ) else : raise tornado . web . HTTPError ( 400 ) | Change the group or permissions of the specified file . Action must be specified when calling this method . | 199 | 19 |
2,125 | def delete ( self , filepath ) : try : self . fs . delete ( filepath ) self . write ( { 'msg' : 'File deleted at {}' . format ( filepath ) } ) except OSError : raise tornado . web . HTTPError ( 404 ) | Delete the specified file . | 59 | 5 |
2,126 | def post ( self ) : filepath = self . get_body_argument ( 'filepath' ) try : self . fs . create_directory ( filepath ) encoded_filepath = tornado . escape . url_escape ( filepath , plus = True ) resource_uri = self . reverse_url ( 'filesystem:directories-details' , encoded_filepath ) self . write ( { 'uri' : resource_uri } ) except OSError : raise tornado . web . HTTPError ( 404 ) | Create a new directory at the specified path . | 110 | 9 |
2,127 | def get ( self , filepath ) : exists = self . fs . exists ( filepath ) if exists : mime = magic . Magic ( mime = True ) mime_type = mime . from_file ( filepath ) if mime_type in self . unsupported_types : self . set_status ( 204 ) return else : contents = self . fs . read_file ( filepath ) self . write ( { 'filepath' : filepath , 'contents' : contents } ) else : raise tornado . web . HTTPError ( 404 ) | Get the contents of the specified file . | 119 | 8 |
2,128 | def post ( self , filepath ) : try : content = self . get_body_argument ( 'content' ) self . fs . write_file ( filepath , content ) self . write ( { 'msg' : 'Updated file at {}' . format ( filepath ) } ) except OSError : raise tornado . web . HTTPError ( 404 ) | Write the given contents to the specified file . This is not an append all file contents will be replaced by the contents given . | 77 | 25 |
2,129 | def get_content ( self , start = None , end = None ) : with open ( self . filepath , "rb" ) as file : if start is not None : file . seek ( start ) if end is not None : remaining = end - ( start or 0 ) else : remaining = None while True : chunk_size = 64 * 1024 if remaining is not None and remaining < chunk_size : chunk_size = remaining chunk = file . read ( chunk_size ) if chunk : if remaining is not None : remaining -= len ( chunk ) yield chunk else : if remaining is not None : assert remaining == 0 return | Retrieve the content of the requested resource which is located at the given absolute path . This method should either return a byte string or an iterator of byte strings . The latter is preferred for large files as it helps reduce memory fragmentation . | 130 | 46 |
2,130 | def set_headers ( self ) : self . set_header ( "Accept-Ranges" , "bytes" ) content_type = self . get_content_type ( ) if content_type : self . set_header ( "Content-Type" , content_type ) | Sets the content headers on the response . | 59 | 9 |
2,131 | def __deactivate_shared_objects ( self , plugin , * args , * * kwargs ) : shared_objects = self . get ( ) for shared_object in shared_objects . keys ( ) : self . unregister ( shared_object ) | Callback which gets executed if the signal plugin_deactivate_post was send by the plugin . | 54 | 19 |
2,132 | def get ( self , name = None ) : return self . app . shared_objects . get ( name , self . plugin ) | Returns requested shared objects which were registered by the current plugin . | 27 | 12 |
2,133 | def get ( self , name = None , plugin = None ) : if plugin is not None : if name is None : shared_objects_list = { } for key in self . _shared_objects . keys ( ) : if self . _shared_objects [ key ] . plugin == plugin : shared_objects_list [ key ] = self . _shared_objects [ key ] return shared_objects_list else : if name in self . _shared_objects . keys ( ) : if self . _shared_objects [ name ] . plugin == plugin : return self . _shared_objects [ name ] else : return None else : return None else : if name is None : return self . _shared_objects else : if name in self . _shared_objects . keys ( ) : return self . _shared_objects [ name ] else : return None | Returns requested shared objects . | 179 | 5 |
2,134 | def unregister ( self , shared_object ) : if shared_object not in self . _shared_objects . keys ( ) : self . log . warning ( "Can not unregister shared object %s" % shared_object ) else : del ( self . _shared_objects [ shared_object ] ) self . log . debug ( "Shared object %s got unregistered" % shared_object ) | Unregisters an existing shared object so that this shared object is no longer available . | 86 | 17 |
2,135 | def list_signals ( self ) : print ( "Signal list" ) print ( "***********\n" ) for key , signal in self . app . signals . signals . items ( ) : print ( "%s (%s)\n %s\n" % ( signal . name , signal . plugin . name , signal . description ) ) | Prints a list of all registered signals . Including description and plugin name . | 73 | 15 |
2,136 | def list_receivers ( self ) : print ( "Receiver list" ) print ( "*************\n" ) for key , receiver in self . app . signals . receivers . items ( ) : print ( "%s <-- %s (%s):\n %s\n" % ( receiver . name , receiver . signal , receiver . plugin . name , receiver . description ) ) | Prints a list of all registered receivers . Including signal plugin name and description . | 83 | 16 |
2,137 | def toxcmd_main ( args = None ) : usage = "USAGE: %(prog)s [OPTIONS] COMMAND args..." if args is None : args = sys . argv [ 1 : ] # -- STEP: Build command-line parser. parser = argparse . ArgumentParser ( description = inspect . getdoc ( toxcmd_main ) , formatter_class = FORMATTER_CLASS ) common_parser = parser . add_argument_group ( "Common options" ) common_parser . add_argument ( "--version" , action = "version" , version = VERSION ) subparsers = parser . add_subparsers ( help = "commands" ) for command in discover_commands ( ) : command_parser = subparsers . add_parser ( command . name , usage = command . usage , description = command . description , help = command . short_description , formatter_class = FORMATTER_CLASS ) command_parser . set_defaults ( func = command ) command . setup_parser ( command_parser ) command . parser = command_parser # -- STEP: Process command-line and run command. options = parser . parse_args ( args ) command_function = options . func return command_function ( options ) | Command util with subcommands for tox environments . | 273 | 10 |
2,138 | def discharge ( ctx , id , caveat , key , checker , locator ) : caveat_id_prefix = [ ] if caveat is None : # The caveat information is encoded in the id itself. caveat = id else : # We've been given an explicit id, so when extra third party # caveats are added, use that id as the prefix # for any more ids. caveat_id_prefix = id cav_info = decode_caveat ( key , caveat ) cav_info = ThirdPartyCaveatInfo ( condition = cav_info . condition , first_party_public_key = cav_info . first_party_public_key , third_party_key_pair = cav_info . third_party_key_pair , root_key = cav_info . root_key , caveat = cav_info . caveat , version = cav_info . version , id = id , namespace = cav_info . namespace ) # Note that we don't check the error - we allow the # third party checker to see even caveats that we can't # understand. try : cond , arg = checkers . parse_caveat ( cav_info . condition ) except ValueError as exc : raise VerificationError ( exc . args [ 0 ] ) if cond == checkers . COND_NEED_DECLARED : cav_info = cav_info . _replace ( condition = arg ) caveats = _check_need_declared ( ctx , cav_info , checker ) else : caveats = checker . check_third_party_caveat ( ctx , cav_info ) # Note that the discharge macaroon does not need to # be stored persistently. Indeed, it would be a problem if # we did, because then the macaroon could potentially be used # for normal authorization with the third party. m = Macaroon ( cav_info . root_key , id , '' , cav_info . version , cav_info . namespace , ) m . _caveat_id_prefix = caveat_id_prefix if caveats is not None : for cav in caveats : m . add_caveat ( cav , key , locator ) return m | Creates a macaroon to discharge a third party caveat . | 468 | 13 |
2,139 | def local_third_party_caveat ( key , version ) : if version >= VERSION_2 : loc = 'local {} {}' . format ( version , key ) else : loc = 'local {}' . format ( key ) return checkers . Caveat ( location = loc , condition = '' ) | Returns a third - party caveat that when added to a macaroon with add_caveat results in a caveat with the location local encrypted with the given PublicKey . This can be automatically discharged by discharge_all passing a local key . | 66 | 49 |
2,140 | def deserialize_namespace ( data ) : if isinstance ( data , bytes ) : data = data . decode ( 'utf-8' ) kvs = data . split ( ) uri_to_prefix = { } for kv in kvs : i = kv . rfind ( ':' ) if i == - 1 : raise ValueError ( 'no colon in namespace ' 'field {}' . format ( repr ( kv ) ) ) uri , prefix = kv [ 0 : i ] , kv [ i + 1 : ] if not is_valid_schema_uri ( uri ) : # Currently this can't happen because the only invalid URIs # are those which contain a space raise ValueError ( 'invalid URI {} in namespace ' 'field {}' . format ( repr ( uri ) , repr ( kv ) ) ) if not is_valid_prefix ( prefix ) : raise ValueError ( 'invalid prefix {} in namespace field' ' {}' . format ( repr ( prefix ) , repr ( kv ) ) ) if uri in uri_to_prefix : raise ValueError ( 'duplicate URI {} in ' 'namespace {}' . format ( repr ( uri ) , repr ( data ) ) ) uri_to_prefix [ uri ] = prefix return Namespace ( uri_to_prefix ) | Deserialize a Namespace object . | 292 | 8 |
2,141 | def serialize_text ( self ) : if self . _uri_to_prefix is None or len ( self . _uri_to_prefix ) == 0 : return b'' od = collections . OrderedDict ( sorted ( self . _uri_to_prefix . items ( ) ) ) data = [ ] for uri in od : data . append ( uri + ':' + od [ uri ] ) return ' ' . join ( data ) . encode ( 'utf-8' ) | Returns a serialized form of the Namepace . | 106 | 10 |
2,142 | def register ( self , uri , prefix ) : if not is_valid_schema_uri ( uri ) : raise KeyError ( 'cannot register invalid URI {} (prefix {})' . format ( uri , prefix ) ) if not is_valid_prefix ( prefix ) : raise ValueError ( 'cannot register invalid prefix %q for URI %q' . format ( prefix , uri ) ) if self . _uri_to_prefix . get ( uri ) is None : self . _uri_to_prefix [ uri ] = prefix | Registers the given URI and associates it with the given prefix . | 119 | 13 |
2,143 | def with_value ( self , key , val ) : new_dict = dict ( self . _dict ) new_dict [ key ] = val return AuthContext ( new_dict ) | Return a copy of the AuthContext object with the given key and value added . | 39 | 16 |
2,144 | def make_pattern ( self , pattern , listsep = ',' ) : if self is Cardinality . one : return pattern elif self is Cardinality . zero_or_one : return self . schema % pattern else : return self . schema % ( pattern , listsep , pattern ) | Make pattern for a data type with the specified cardinality . | 62 | 12 |
2,145 | def with_cardinality ( cls , cardinality , converter , pattern = None , listsep = ',' ) : if cardinality is Cardinality . one : return converter # -- NORMAL-CASE builder_func = getattr ( cls , "with_%s" % cardinality . name ) if cardinality is Cardinality . zero_or_one : return builder_func ( converter , pattern ) else : # -- MANY CASE: 0..*, 1..* return builder_func ( converter , pattern , listsep = listsep ) | Creates a type converter for the specified cardinality by using the type converter for T . | 120 | 18 |
2,146 | def with_zero_or_one ( cls , converter , pattern = None ) : cardinality = Cardinality . zero_or_one if not pattern : pattern = getattr ( converter , "pattern" , cls . default_pattern ) optional_pattern = cardinality . make_pattern ( pattern ) group_count = cardinality . compute_group_count ( pattern ) def convert_optional ( text , m = None ) : if text : text = text . strip ( ) if not text : return None return converter ( text ) convert_optional . pattern = optional_pattern # OLD: convert_optional.group_count = group_count convert_optional . regex_group_count = group_count return convert_optional | Creates a type converter for a T with 0 .. 1 times by using the type converter for one item of T . | 154 | 24 |
2,147 | def server_static ( filepath ) : mimetype = "image/svg+xml" if filepath . endswith ( ".svg" ) else "auto" return bottle . static_file ( filepath , root = conf . StaticPath , mimetype = mimetype ) | Handler for serving static files . | 63 | 6 |
2,148 | def mouse ( table , day = None ) : where = ( ( "day" , day ) , ) if day else ( ) events = db . fetch ( table , where = where , order = "day" ) for e in events : e [ "dt" ] = datetime . datetime . fromtimestamp ( e [ "stamp" ] ) stats , positions , events = stats_mouse ( events , table ) days , input = db . fetch ( "counts" , order = "day" , type = table ) , "mouse" return bottle . template ( "heatmap.tpl" , locals ( ) , conf = conf ) | Handler for showing mouse statistics for specified type and day . | 137 | 11 |
2,149 | def keyboard ( table , day = None ) : cols , group = "realkey AS key, COUNT(*) AS count" , "realkey" where = ( ( "day" , day ) , ) if day else ( ) counts_display = counts = db . fetch ( table , cols , where , group , "count DESC" ) if "combos" == table : counts_display = db . fetch ( table , "key, COUNT(*) AS count" , where , "key" , "count DESC" ) events = db . fetch ( table , where = where , order = "stamp" ) for e in events : e [ "dt" ] = datetime . datetime . fromtimestamp ( e [ "stamp" ] ) stats , collatedevents = stats_keyboard ( events , table ) days , input = db . fetch ( "counts" , order = "day" , type = table ) , "keyboard" return bottle . template ( "heatmap.tpl" , locals ( ) , conf = conf ) | Handler for showing the keyboard statistics page . | 229 | 8 |
2,150 | def inputindex ( input ) : stats = { } countminmax = "SUM(count) AS count, MIN(day) AS first, MAX(day) AS last" tables = ( "moves" , "clicks" , "scrolls" ) if "mouse" == input else ( "keys" , "combos" ) for table in tables : stats [ table ] = db . fetchone ( "counts" , countminmax , type = table ) stats [ table ] [ "days" ] = db . fetch ( "counts" , order = "day DESC" , type = table ) return bottle . template ( "input.tpl" , locals ( ) , conf = conf ) | Handler for showing keyboard or mouse page with day and total links . | 152 | 13 |
2,151 | def index ( ) : stats = dict ( ( k , { "count" : 0 } ) for k , tt in conf . InputTables ) countminmax = "SUM(count) AS count, MIN(day) AS first, MAX(day) AS last" for input , table in [ ( x , t ) for x , tt in conf . InputTables for t in tt ] : row = db . fetchone ( "counts" , countminmax , type = table ) if not row [ "count" ] : continue # for input, table stats [ input ] [ "count" ] += row [ "count" ] for func , key in [ ( min , "first" ) , ( max , "last" ) ] : stats [ input ] [ key ] = ( row [ key ] if key not in stats [ input ] else func ( stats [ input ] [ key ] , row [ key ] ) ) return bottle . template ( "index.tpl" , locals ( ) , conf = conf ) | Handler for showing the GUI index page . | 220 | 8 |
2,152 | def stats_keyboard ( events , table ) : if len ( events ) < 2 : return [ ] , [ ] deltas , prev_dt = [ ] , None sessions , session = [ ] , None UNBROKEN_DELTA = datetime . timedelta ( seconds = conf . KeyboardSessionMaxDelta ) blank = collections . defaultdict ( lambda : collections . defaultdict ( int ) ) collated = [ blank . copy ( ) ] # [{dt, keys: {key: count}}] for e in events : if prev_dt : if ( prev_dt . second != e [ "dt" ] . second or prev_dt . minute != e [ "dt" ] . minute or prev_dt . hour != e [ "dt" ] . hour ) : collated . append ( blank . copy ( ) ) delta = e [ "dt" ] - prev_dt deltas . append ( delta ) if delta > UNBROKEN_DELTA : session = None else : if not session : session = [ ] sessions . append ( session ) session . append ( delta ) collated [ - 1 ] [ "dt" ] = e [ "dt" ] collated [ - 1 ] [ "keys" ] [ e [ "realkey" ] ] += 1 prev_dt = e [ "dt" ] longest_session = max ( sessions + [ [ datetime . timedelta ( ) ] ] , key = lambda x : sum ( x , datetime . timedelta ( ) ) ) stats = [ ( "Average interval between combos" , sum ( deltas , datetime . timedelta ( ) ) / len ( deltas ) ) , ] if "combos" == table else [ ( "Keys per hour" , int ( 3600 * len ( events ) / timedelta_seconds ( events [ - 1 ] [ "dt" ] - events [ 0 ] [ "dt" ] ) ) ) , ( "Average interval between keys" , sum ( deltas , datetime . timedelta ( ) ) / len ( deltas ) ) , ( "Typing sessions (key interval < %ss)" % UNBROKEN_DELTA . seconds , len ( sessions ) ) , ( "Average keys in session" , sum ( len ( x ) + 1 for x in sessions ) / len ( sessions ) ) , ( "Average session duration" , sum ( ( sum ( x , datetime . timedelta ( ) ) for x in sessions ) , datetime . timedelta ( ) ) / len ( sessions ) ) , ( "Longest session duration" , sum ( longest_session , datetime . timedelta ( ) ) ) , ( "Keys in longest session" , len ( longest_session ) + 1 ) , ( "Most keys in session" , max ( len ( x ) + 1 for x in sessions ) ) , ] return stats , collated | Return statistics and collated events for keyboard events . | 616 | 10 |
2,153 | def timedelta_seconds ( timedelta ) : return ( timedelta . total_seconds ( ) if hasattr ( timedelta , "total_seconds" ) else timedelta . days * 24 * 3600 + timedelta . seconds + timedelta . microseconds / 1000000. ) | Returns the total timedelta duration in seconds . | 59 | 9 |
2,154 | def init ( ) : global app if app : return app conf . init ( ) , db . init ( conf . DbPath , conf . DbStatements ) bottle . TEMPLATE_PATH . insert ( 0 , conf . TemplatePath ) app = bottle . default_app ( ) bottle . BaseTemplate . defaults . update ( get_url = app . get_url ) return app | Initialize configuration and web application . | 83 | 7 |
2,155 | def start ( ) : global app bottle . run ( app , host = conf . WebHost , port = conf . WebPort , debug = conf . WebAutoReload , reloader = conf . WebAutoReload , quiet = conf . WebQuiet ) | Starts the web server . | 54 | 6 |
2,156 | def download ( url , proxies = None ) : # Handle default argument if proxies is None : proxies = [ "" ] # Loop over all available connections for proxy in proxies : # Handle no proxy case if proxy == "" : socket . socket = DEFAULT_SOCKET # Handle SOCKS proxy elif proxy . startswith ( 'socks' ) : if proxy [ 5 ] == '4' : proxy_type = socks . SOCKS4 else : proxy_type = socks . SOCKS5 proxy = proxy [ proxy . find ( '://' ) + 3 : ] try : proxy , port = proxy . split ( ':' ) except ValueError : port = None socks . set_default_proxy ( proxy_type , proxy , port ) socket . socket = socks . socksocket # Handle generic HTTP proxy else : try : proxy , port = proxy . split ( ':' ) except ValueError : port = None socks . set_default_proxy ( socks . HTTP , proxy , port ) socket . socket = socks . socksocket downloaded = _download_helper ( url ) if downloaded is not None : return downloaded # In case of running out of proxies, return (None, None) return ( None , None ) | Download a PDF or DJVU document from a url eventually using proxies . | 257 | 15 |
2,157 | def make_format ( format_spec ) : fill = '' align = '' zero = '' width = format_spec . width if format_spec . align : align = format_spec . align [ 0 ] if format_spec . fill : fill = format_spec . fill [ 0 ] if format_spec . zero : zero = '0' precision_part = "" if format_spec . precision : precision_part = ".%s" % format_spec . precision # -- FORMAT-SPEC: [[fill]align][0][width][.precision][type] return "%s%s%s%s%s%s" % ( fill , align , zero , width , precision_part , format_spec . type ) | Build format string from a format specification . | 154 | 8 |
2,158 | def extract_fields ( cls , schema ) : # -- BASED-ON: parse.Parser._generate_expression() for part in parse . PARSE_RE . split ( schema ) : if not part or part == '{{' or part == '}}' : continue elif part [ 0 ] == '{' : # this will be a braces-delimited field to handle yield cls . parse ( part ) | Extract fields in a parse expression schema . | 91 | 9 |
2,159 | def _registerHandler ( self , handler ) : self . _logger . addHandler ( handler ) self . _handlers . append ( handler ) | Registers a handler . | 31 | 5 |
2,160 | def _unregisterHandler ( self , handler , shutdown = True ) : if handler in self . _handlers : self . _handlers . remove ( handler ) self . _logger . removeHandler ( handler ) if shutdown : try : handler . close ( ) except KeyError : # Depending on the Python version, it's possible for this call # to fail most likely because some logging module objects get # garbage collected before the VSGLogger object is. pass | Unregisters the logging handler . | 96 | 7 |
2,161 | def getLogger ( cls , name = None ) : return logging . getLogger ( "{0}.{1}" . format ( cls . BASENAME , name ) if name else cls . BASENAME ) | Retrieves the Python native logger | 48 | 7 |
2,162 | def debug ( cls , name , message , * args ) : cls . getLogger ( name ) . debug ( message , * args ) | Convenience function to log a message at the DEBUG level . | 31 | 13 |
2,163 | def info ( cls , name , message , * args ) : cls . getLogger ( name ) . info ( message , * args ) | Convenience function to log a message at the INFO level . | 31 | 13 |
2,164 | def warning ( cls , name , message , * args ) : cls . getLogger ( name ) . warning ( message , * args ) | Convenience function to log a message at the WARNING level . | 31 | 13 |
2,165 | def error ( cls , name , message , * args ) : cls . getLogger ( name ) . error ( message , * args ) | Convenience function to log a message at the ERROR level . | 31 | 13 |
2,166 | def critical ( cls , name , message , * args ) : cls . getLogger ( name ) . critical ( message , * args ) | Convenience function to log a message at the CRITICAL level . | 31 | 15 |
2,167 | def exception ( cls , name , message , * args ) : cls . getLogger ( name ) . exception ( message , * args ) | Convenience function to log a message at the ERROR level with additonal exception information . | 31 | 19 |
2,168 | def allow ( self , ctx , ops ) : auth_info , _ = self . allow_any ( ctx , ops ) return auth_info | Checks that the authorizer s request is authorized to perform all the given operations . Note that allow does not check first party caveats - if there is more than one macaroon that may authorize the request it will choose the first one that does regardless . | 32 | 51 |
2,169 | def allow_any ( self , ctx , ops ) : authed , used = self . _allow_any ( ctx , ops ) return self . _new_auth_info ( used ) , authed | like allow except that it will authorize as many of the operations as possible without requiring any to be authorized . If all the operations succeeded the array will be nil . | 45 | 32 |
2,170 | def allow_capability ( self , ctx , ops ) : nops = 0 for op in ops : if op != LOGIN_OP : nops += 1 if nops == 0 : raise ValueError ( 'no non-login operations required in capability' ) _ , used = self . _allow_any ( ctx , ops ) squasher = _CaveatSquasher ( ) for i , is_used in enumerate ( used ) : if not is_used : continue for cond in self . _conditions [ i ] : squasher . add ( cond ) return squasher . final ( ) | Checks that the user is allowed to perform all the given operations . If not a discharge error will be raised . If allow_capability succeeds it returns a list of first party caveat conditions that must be applied to any macaroon granting capability to execute the operations . Those caveat conditions will not include any declarations contained in login macaroons - the caller must be careful not to mint a macaroon associated with the LOGIN_OP operation unless they add the expected declaration caveat too - in general clients should not create capabilities that grant LOGIN_OP rights . | 129 | 113 |
2,171 | def register ( self , name , path , description , final_words = None ) : return self . __app . recipes . register ( name , path , self . _plugin , description , final_words ) | Registers a new recipe in the context of the current plugin . | 43 | 13 |
2,172 | def get ( self , name = None ) : return self . __app . recipes . get ( name , self . _plugin ) | Gets a list of all recipes which are registered by the current plugin . If a name is provided only the requested recipe is returned or None . | 27 | 29 |
2,173 | def build ( self , recipe ) : return self . __app . recipes . build ( recipe , self . _plugin ) | Builds a recipe | 25 | 4 |
2,174 | def register ( self , name , path , plugin , description = None , final_words = None ) : if name in self . recipes . keys ( ) : raise RecipeExistsException ( "Recipe %s was already registered by %s" % ( name , self . recipes [ "name" ] . plugin . name ) ) self . recipes [ name ] = Recipe ( name , path , plugin , description , final_words ) self . __log . debug ( "Recipe %s registered by %s" % ( name , plugin . name ) ) return self . recipes [ name ] | Registers a new recipe . | 121 | 6 |
2,175 | def unregister ( self , recipe ) : if recipe not in self . recipes . keys ( ) : self . __log . warning ( "Can not unregister recipe %s" % recipe ) else : del ( self . recipes [ recipe ] ) self . __log . debug ( "Recipe %s got unregistered" % recipe ) | Unregisters an existing recipe so that this recipe is no longer available . | 69 | 15 |
2,176 | def get ( self , recipe = None , plugin = None ) : if plugin is not None : if recipe is None : recipes_list = { } for key in self . recipes . keys ( ) : if self . recipes [ key ] . plugin == plugin : recipes_list [ key ] = self . recipes [ key ] return recipes_list else : if recipe in self . recipes . keys ( ) : if self . recipes [ recipe ] . plugin == plugin : return self . recipes [ recipe ] else : return None else : return None else : if recipe is None : return self . recipes else : if recipe in self . recipes . keys ( ) : return self . recipes [ recipe ] else : return None | Get one or more recipes . | 146 | 6 |
2,177 | def build ( self , recipe , plugin = None ) : if recipe not in self . recipes . keys ( ) : raise RecipeMissingException ( "Recipe %s unknown." % recipe ) recipe_obj = self . recipes [ recipe ] if plugin is not None : if recipe_obj . plugin != plugin : raise RecipeWrongPluginException ( "The requested recipe does not belong to the given plugin. Use" "the app object, to retrieve the requested recipe: " "my_app.recipes.get(%s)" % recipe ) recipe_obj . build ( ) | Execute a recipe and creates new folder and files . | 119 | 11 |
2,178 | def build ( self , output_dir = None , * * kwargs ) : if output_dir is None : output_dir = os . getcwd ( ) target = cookiecutter ( self . path , output_dir = output_dir , * * kwargs ) if self . final_words is not None and len ( self . final_words ) > 0 : print ( "" ) print ( self . final_words ) return target | Buildes the recipe and creates needed folder and files . May ask the user for some parameter inputs . | 95 | 20 |
2,179 | def where_am_i ( ) : locations = { 'Work' : 0 , 'Home' : 0 } for ssid in scan_for_ssids ( ) : #print('checking scanned_ssid ', ssid) for l in logged_ssids : #print('checking logged_ssid ', l) if l [ 'name' ] == ssid : locations [ l [ 'location' ] ] += 1 #print('MATCH') print ( 'Where Am I: SSIDS Matching Home = ' , locations [ 'Home' ] , ' SSIDs matching Work = ' , locations [ 'Work' ] ) return max ( locations . keys ( ) , key = lambda k : locations [ k ] ) | high level function that can estimate where user is based on predefined setups . | 152 | 15 |
2,180 | def summarise ( self ) : res = '' if self . user == 'Developer' : if self . host == 'Home PC' : res += 'At Home' else : res += 'Away from PC' elif self . user == 'User' and self . host == 'Home PC' : res += 'Remote desktop into home PC' res += '\n' res += self . transport return res | extrapolate a human readable summary of the contexts | 86 | 10 |
2,181 | def get_host ( self ) : import socket host_name = socket . gethostname ( ) for h in hosts : if h [ 'name' ] == host_name : return h [ 'type' ] , h [ 'name' ] return dict ( type = 'Unknown' , name = host_name ) | returns the host computer running this program | 67 | 8 |
2,182 | def get_user ( self ) : for name in ( 'LOGNAME' , 'USER' , 'LNAME' , 'USERNAME' ) : user = os . environ . get ( name ) if user : break for u in users : if u [ 'name' ] == user : return u [ 'type' ] , u [ 'name' ] | returns the username on this computer | 76 | 7 |
2,183 | def get_host_usage ( self ) : import psutil process_names = [ proc . name for proc in psutil . process_iter ( ) ] cpu_pct = psutil . cpu_percent ( interval = 1 ) mem = psutil . virtual_memory ( ) return str ( cpu_pct ) , str ( len ( process_names ) ) , str ( mem . available ) , str ( mem . total ) | get details of CPU RAM usage of this PC | 91 | 9 |
2,184 | def schema ( ) : return Schema ( { 'script' : And ( Or ( type ( ' ' ) , type ( u' ' ) ) , len ) , Optional ( 'title' , default = '' ) : str , Optional ( 'model' , default = { } ) : { Optional ( And ( str , len ) ) : object } , Optional ( 'env' , default = { } ) : { Optional ( And ( str , len ) ) : And ( str , len ) } , Optional ( 'item' , default = None ) : object , Optional ( 'dry_run' , default = False ) : bool , Optional ( 'debug' , default = False ) : bool , Optional ( 'strict' , default = False ) : bool , Optional ( 'variables' , default = { } ) : { Optional ( And ( Or ( type ( ' ' ) , type ( u' ' ) ) , len , Regex ( r'([a-zA-Z][_a-zA-Z]*)' ) ) ) : Or ( type ( ' ' ) , type ( u' ' ) ) } , Optional ( 'temporary_scripts_path' , default = '' ) : Or ( type ( '' ) , type ( u'' ) ) , Optional ( 'internal' , default = False ) : bool } ) | Provide schema for shell configuration . | 284 | 7 |
2,185 | def get_by_name ( self , name ) : for p in self . project_list : if p . nme == name : return p return None | returns an object Project which matches name | 33 | 8 |
2,186 | def execute_tasks ( self ) : for t in self . tasks : print ( 'RUNNING ' + str ( t . task_id ) + ' = ' + t . name ) t . execute ( ) if t . success != '__IGNORE__RESULT__' : print ( t ) print ( 'TASK RESULT :' , t . result , ' but success = ' , t . success ) if t . result != t . success : #raise Exception('Project execution failed at task ' + str(t.task_id) + ' = ' + t.name) print ( 'ABORTING TASK EXECUTION SEQUENCE' + str ( t . task_id ) + ' = ' + t . name ) break | run execute on all tasks IFF prior task is successful | 162 | 11 |
2,187 | def build_report ( self , op_file , tpe = 'md' ) : if tpe == 'md' : res = self . get_report_md ( ) elif tpe == 'rst' : res = self . get_report_rst ( ) elif tpe == 'html' : res = self . get_report_html ( ) else : res = 'Unknown report type passed to project.build_report' with open ( op_file , 'w' ) as f : f . write ( res ) | create a report showing all project details | 116 | 7 |
2,188 | def get_report_rst ( self ) : res = '' res += '-----------------------------------\n' res += self . nme + '\n' res += '-----------------------------------\n\n' res += self . desc + '\n' res += self . fldr + '\n\n' res += '.. contents:: \n\n\n' res += 'Overview\n' + '===========================================\n\n' res += 'This document contains details on the project ' + self . nme + '\n\n' for d in self . details : res += ' - ' + d [ 0 ] + ' = ' + d [ 1 ] + '\n\n' res += '\nTABLES\n' + '===========================================\n\n' for t in self . datatables : res += t . name + '\n' res += '-------------------------\n\n' res += t . format_rst ( ) + '\n\n' return res | formats the project into a report in RST format | 222 | 11 |
2,189 | def get_report_html ( self ) : res = '<h2>Project:' + self . nme + '</h2>' res += '<p>' + self . desc + '</p>' res += '<p>' + self . fldr + '</p>' res += '<BR><h3>TABLES</h3>' for t in self . datatables : res += '<b>' + t . name + '<b><BR>' res += '<p>' + str ( t ) + '</p>' return res | formats the project into a report in MD format - WARNING - tables missing BR | 132 | 16 |
2,190 | def add_param ( self , param_key , param_val ) : self . params . append ( [ param_key , param_val ] ) if param_key == '__success_test' : self . success = param_val | adds parameters as key value pairs | 51 | 7 |
2,191 | def execute ( self ) : func_params = [ ] exec_str = self . func . __name__ + '(' for p in self . params : if p [ 0 ] [ 0 : 2 ] != '__' : # ignore custom param names exec_str += p [ 0 ] + '="' + self . _force_str ( p [ 1 ] ) + '", ' func_params . append ( p [ 1 ] ) exec_str = exec_str [ : - 2 ] exec_str += ') # task' + str ( self . task_id ) + ': ' + self . name self . result = self . func ( * func_params ) print ( exec_str + ' loaded ' , self . result ) | executes all automatic tasks in order of task id | 158 | 10 |
2,192 | def create_column_index ( annotations ) : _column_index = OrderedDict ( { 'Column Name' : annotations [ 'Column Name' ] } ) categorical_rows = annotation_rows ( 'C:' , annotations ) _column_index . update ( categorical_rows ) numerical_rows = { name : [ float ( x ) if x != '' else float ( 'NaN' ) for x in values ] for name , values in annotation_rows ( 'N:' , annotations ) . items ( ) } # to floats _column_index . update ( numerical_rows ) column_index = pd . MultiIndex . from_tuples ( list ( zip ( * _column_index . values ( ) ) ) , names = list ( _column_index . keys ( ) ) ) if len ( column_index . names ) == 1 : # flatten single-level index name = column_index . names [ 0 ] column_index = column_index . get_level_values ( name ) return column_index | Create a pd . MultiIndex using the column names and any categorical rows . Note that also non - main columns will be assigned a default category . | 219 | 31 |
2,193 | def read_perseus ( path_or_file , * * kwargs ) : annotations = read_annotations ( path_or_file , separator ) column_index = create_column_index ( annotations ) if 'usecols' in kwargs : usecols = kwargs [ 'usecols' ] if type ( usecols [ 0 ] ) is str : usecols = sorted ( [ list ( column_index ) . index ( x ) for x in usecols ] ) column_index = column_index [ usecols ] kwargs [ 'dtype' ] = dict ( kwargs . get ( 'dtype' , { } ) , * * annotations . get ( 'dtype' , { } ) ) kwargs [ 'converters' ] = dict ( kwargs . get ( 'converters' , { } ) , * * annotations . get ( 'converters' , { } ) ) df = pd . read_csv ( path_or_file , sep = separator , comment = '#' , * * kwargs ) df . columns = column_index return df | Read a Perseus - formatted matrix into a pd . DataFrame . Annotation rows will be converted into a multi - index . | 251 | 27 |
2,194 | def to_perseus ( df , path_or_file , main_columns = None , separator = separator , convert_bool_to_category = True , numerical_annotation_rows = set ( [ ] ) ) : _df = df . copy ( ) if not _df . columns . name : _df . columns . name = 'Column Name' column_names = _df . columns . get_level_values ( 'Column Name' ) annotations = { } main_columns = _infer_main_columns ( _df ) if main_columns is None else main_columns annotations [ 'Type' ] = [ 'E' if column_names [ i ] in main_columns else dtype_to_perseus ( dtype ) for i , dtype in enumerate ( _df . dtypes ) ] # detect multi-numeric columns for i , column in enumerate ( _df . columns ) : valid_values = [ value for value in _df [ column ] if value is not None ] if len ( valid_values ) > 0 and all ( type ( value ) is list for value in valid_values ) : annotations [ 'Type' ] [ i ] = 'M' _df [ column ] = _df [ column ] . apply ( lambda xs : ';' . join ( str ( x ) for x in xs ) ) if convert_bool_to_category : for i , column in enumerate ( _df . columns ) : if _df . dtypes [ i ] is np . dtype ( 'bool' ) : values = _df [ column ] . values _df [ column ] [ values ] = '+' _df [ column ] [ ~ values ] = '' annotation_row_names = set ( _df . columns . names ) - { 'Column Name' } for name in annotation_row_names : annotation_type = 'N' if name in numerical_annotation_rows else 'C' annotations [ '{}:{}' . format ( annotation_type , name ) ] = _df . columns . get_level_values ( name ) with PathOrFile ( path_or_file , 'w' ) as f : f . write ( separator . join ( column_names ) + '\n' ) for name , values in annotations . items ( ) : f . write ( '#!{{{name}}}{values}\n' . format ( name = name , values = separator . join ( [ str ( x ) for x in values ] ) ) ) _df . to_csv ( f , header = None , index = False , sep = separator ) | Save pd . DataFrame to Perseus text format . | 569 | 12 |
2,195 | def get_page ( search_text ) : lst = search_aikif ( search_text ) txt = '<table class="as-table as-table-zebra as-table-horizontal">' for result in lst : txt += '<TR><TD>' + result + '</TD></TR>' txt += '</TABLE>\n\n' return txt | formats the entire search result in a table output | 89 | 10 |
2,196 | def search_aikif ( txt , formatHTML = True ) : results = [ ] num_found = 0 import aikif . lib . cls_filelist as mod_fl my_files = mod_fl . FileList ( [ aikif_folder ] , [ '*.*' ] , [ '*.pyc' ] ) files = my_files . get_list ( ) for f in files : try : num_found = 0 with open ( f , 'r' ) as cur : line_num = 0 for line in cur : line_num += 1 if txt in line : num_found += 1 if formatHTML is True : results . append ( format_result ( line , line_num , txt ) ) else : results . append ( [ f , line , line_num , txt ] ) if num_found > 0 : if formatHTML is True : results . append ( '<h3>' + f + ' = ' + str ( num_found ) + ' results</h3>' ) else : print ( f + ' = ' + str ( num_found ) + '' ) except Exception : results . append ( 'problem with file ' + f ) if len ( results ) == 0 : results . append ( "No results" ) return results | search for text - currently this looks in all folders in the root of AIKIF but that also contains binaries so will need to use the agent_filelist . py to specify the list of folders . NOTE - this needs to use indexes rather than full search each time | 277 | 54 |
2,197 | def format_result ( line , line_num , txt ) : return ' ' + str ( line_num ) + ': ' + line . replace ( txt , '<span style="background-color: #FFFF00">' + txt + '</span>' ) | highlight the search result | 69 | 5 |
2,198 | def TEST ( ) : w = World ( 'Mars' , [ 0 , 0.0 , 0.9 , 0.0 ] ) print ( w ) p = Person ( 'Rover' , { 'tax_min' : 0.0 , 'tax_max' : 0.9 , 'tradition' : 0.9 , 'equity' : 0.0 } ) print ( p ) h = Happiness ( p , w ) #h.add_factor(HappinessFactors(name, type, min, max)) h . add_factor ( HappinessFactors ( 'tax' , 'Economic' , 0.1 , 0.3 ) ) h . add_factor ( HappinessFactors ( 'tradition' , 'Personal' , 0.3 , 0.9 ) ) h . add_factor ( HappinessFactors ( 'equity' , 'Personal' , 0.1 , 0.9 ) ) h . add_factor ( HappinessFactors ( 'growth' , 'Economic' , 0.01 , 0.09 ) ) print ( h . show_details ( ) ) | Modules for testing happiness of persons in worlds based on simplistic preferences . Just a toy - dont take seriously | 236 | 21 |
2,199 | def solve ( self , max_worlds = 10000 , silent = False ) : self . num_worlds = 0 num_unhappy = 0 for tax_rate in range ( self . tax_range [ 0 ] , self . tax_range [ 1 ] ) : for equity in range ( self . equity_range [ 0 ] , self . equity_range [ 1 ] ) : for tradition in range ( self . tradition_range [ 0 ] , self . tradition_range [ 1 ] ) : self . num_worlds += 1 if self . num_worlds > max_worlds : break w = World ( str ( self . num_worlds ) . zfill ( 6 ) , [ 5000 , tax_rate / 10 , tradition / 10 , equity / 10 ] ) world_happiness = 0 num_unhappy = 0 for person in self . all_people : wh = Happiness ( person , w ) world_happiness += wh . rating if wh . rating < 0 : num_unhappy += 1 if world_happiness > self . net_happiness : self . net_happiness = world_happiness self . unhappy_people = num_unhappy if not silent : print ( 'found better world - ' + w . nme + ' = ' + str ( world_happiness ) + ' - total unhappy_people = ' + str ( self . unhappy_people ) ) | find the best world to make people happy | 297 | 8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.