idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
14,700
def create_bmi_model ( self , engine , bmi_class = None , wrapper_kwargs = None ) : if wrapper_kwargs is None : wrapper_kwargs = { } if bmi_class is None : wrapper_class = bmi . wrapper . BMIWrapper else : wrapper_class = self . import_from_string ( bmi_class ) try : """most models use engine as a first argument""" model = wrapper_class ( engine , * * wrapper_kwargs ) except TypeError as e : """but old python engines are engines, so they don't, but they should """ logger . warn ( 'Model wrapper %s does not accept engine as a first argument' , wrapper_class ) model = wrapper_class ( * * wrapper_kwargs ) return model
initialize a bmi mode using an optional class
169
10
14,701
def register ( self ) : # connect to tracker result = requests . post ( urljoin ( self . tracker , 'models' ) , data = json . dumps ( self . metadata ) ) logger . debug ( "registered at server %s: %s" , self . tracker , result ) self . metadata [ "tracker" ] = result . json ( )
register model at tracking server
75
5
14,702
def unregister ( self ) : uuid = self . metadata [ "tracker" ] [ "uuid" ] # connect to server result = requests . delete ( urljoin ( self . tracker , 'models' + "/" + uuid ) ) logger . debug ( "unregistered at server %s with %s: %s" , self . tracker , uuid , result )
unregister model at tracking server
81
6
14,703
def create_sockets ( self ) : ports = self . ports context = zmq . Context ( ) poller = zmq . Poller ( ) # Socket to handle init data rep = context . socket ( zmq . REP ) # this was inconsequent: here REQ is for the client, we reply with REP. # PULL and PUB is seen from here, not from the client. # Is now renamed to PUSH and SUB: everything is seen from outside. if "REQ" in ports : rep . bind ( "tcp://*:{port}" . format ( port = ports [ "REQ" ] ) ) else : ports [ "REQ" ] = rep . bind_to_random_port ( "tcp://*" ) pull = context . socket ( zmq . PULL ) if "PUSH" in ports : pull . bind ( "tcp://*:{port}" . format ( port = ports [ "PUSH" ] ) ) else : ports [ "PUSH" ] = pull . bind_to_random_port ( "tcp://*" ) # for sending model messages pub = context . socket ( zmq . PUB ) if "SUB" in ports : pub . bind ( "tcp://*:{port}" . format ( port = ports [ "SUB" ] ) ) else : ports [ "SUB" ] = pub . bind_to_random_port ( "tcp://*" ) poller . register ( rep , zmq . POLLIN ) poller . register ( pull , zmq . POLLIN ) sockets = dict ( poller = poller , rep = rep , pull = pull , pub = pub ) return sockets
create zmq sockets
374
5
14,704
def run ( self ) : model = self . model configfile = self . configfile interval = self . interval sockets = self . sockets model . initialize ( configfile ) if model . state == 'pause' : logger . info ( "model initialized and started in pause mode, waiting for requests" ) else : logger . info ( "model started and initialized, running" ) if self . tracker : self . register ( ) atexit . register ( self . unregister ) self . process_incoming ( ) # Keep on counting indefinitely counter = itertools . count ( ) logger . info ( "Entering timeloop..." ) for i in counter : while model . state == "pause" : # keep waiting for messages when paused # process_incoming should set model.state to play self . process_incoming ( ) else : # otherwise process messages once and continue self . process_incoming ( ) if model . state == "quit" : break # lookup dt or use -1 (default) dt = model . get_time_step ( ) or - 1 model . update ( dt ) # check counter, if not a multiple of interval, skip this step if i % interval : continue for key in self . output_vars : value = model . get_var ( key ) metadata = { 'name' : key , 'iteration' : i } # 4ms for 1M doubles logger . debug ( "sending {}" . format ( metadata ) ) if 'pub' in sockets : send_array ( sockets [ 'pub' ] , value , metadata = metadata ) logger . info ( "Finalizing..." ) model . finalize ( )
run the model
349
3
14,705
def calc_basics ( width = - 1 , length = - 1 , height = 2.4 , prevailing_wind = 2.8 ) : if width == - 1 : width = int ( input ( 'enter building width : ' ) ) if length == - 1 : length = int ( input ( 'enter building length : ' ) ) res = { } res [ 'area' ] = width * length res [ 'perim' ] = 2 * width + 2 * length res [ 'roof_cladding' ] = res [ 'area' ] res [ 'wall_cladding' ] = res [ 'perim' ] * height pprint ( res ) return res
calculate various aspects of the structure
143
8
14,706
def bld_rafter_deflection ( length = - 9 , force = - 9 , E_mod_elasticity = - 9 , I_moment_of_intertia = - 9 ) : if length == - 9 : length = float ( input ( 'enter rafter length : ' ) ) if force == - 9 : force = float ( input ( 'enter Force or weight applied to roof : ' ) ) if E_mod_elasticity == - 9 : E_mod_elasticity = float ( input ( 'enter modulus of elasticity x10**5 (Steel beam example=2.1) : ' ) ) if I_moment_of_intertia == - 9 : I_moment_of_intertia = float ( input ( 'enter members "moment of intertia x10**6" (for Steel beam 410UB53.7=188 ) :' ) ) res = { } res [ 'max deflection - centre load' ] = ( 1 * force * ( length ** 3 ) ) / ( 48 * ( E_mod_elasticity * 10 ** 5 ) * ( I_moment_of_intertia * 10 ** 6 ) ) res [ 'max deflection - distrib load' ] = ( 5 * force * ( length ** 4 ) ) / ( 384 * ( E_mod_elasticity * 10 ** 5 ) * ( I_moment_of_intertia * 10 ** 6 ) ) pprint ( res ) return res
calculate rafter deflections - see test_calc_building_design . py for Sample values for equations below from Structures II course
328
30
14,707
def get_parties ( self , obj ) : return PartySerializer ( Party . objects . all ( ) , many = True ) . data
All parties .
30
3
14,708
def get_elections ( self , obj ) : election_day = ElectionDay . objects . get ( date = self . context [ "election_date" ] ) kwargs = { "race__office__body" : obj , "election_day" : election_day } if self . context . get ( "division" ) and obj . slug == "senate" : kwargs [ "division" ] = self . context [ "division" ] elif self . context . get ( "division" ) and obj . slug == "house" : kwargs [ "division__parent" ] = self . context [ "division" ] if obj . slug == "house" and not self . context . get ( "division" ) : kwargs [ "race__special" ] = False elections = Election . objects . filter ( * * kwargs ) return ElectionSerializer ( elections , many = True ) . data
All elections held on an election day .
198
8
14,709
def parse_channels ( self ) : channels = [ ] for channel in self . _project_dict [ "channels" ] : channels . append ( Channel ( channel , self . _is_sixteen_bit , self . _ignore_list ) ) return channels
Creates an array of Channel objects from the project
57
10
14,710
def update ( self ) : for channel in self . channels : channel . update ( ) for i in range ( len ( self . _project_dict [ "channels" ] ) ) : channel_dict = self . _project_dict [ "channels" ] [ i ] for channel in self . channels : if channel . name == channel_dict [ "common.ALLTYPES_NAME" ] : self . _project_dict [ "channels" ] [ i ] = channel . as_dict ( )
Updates the dictionary of the project
110
7
14,711
def cmd ( send , * _ ) : url = get ( 'http://distrowatch.com/random.php' ) . url match = re . search ( '=(.*)' , url ) if match : send ( match . group ( 1 ) ) else : send ( "no distro found" )
Gets a random distro .
65
7
14,712
def cmd ( send , * _ ) : try : output = subprocess . check_output ( [ 'ddate' ] , universal_newlines = True ) except subprocess . CalledProcessError : output = 'Today is the day you install ddate!' for line in output . splitlines ( ) : send ( line )
Returns the Discordian date .
68
6
14,713
def cmd ( send , msg , args ) : parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( '--chan' , '--channel' , action = arguments . ChanParser ) try : cmdargs , extra = parser . parse_known_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return target = cmdargs . channels [ 0 ] if hasattr ( cmdargs , 'channels' ) else args [ 'target' ] mode = " " . join ( extra ) if not mode : send ( 'Please specify a mode.' ) elif target == 'private' : send ( "Modes don't work in a PM!" ) else : with args [ 'handler' ] . data_lock : if target not in args [ 'handler' ] . channels : send ( "Bot not in channel %s" % target ) elif args [ 'botnick' ] not in args [ 'handler' ] . opers [ target ] : send ( "Bot must be opped in channel %s" % target ) else : args [ 'handler' ] . connection . mode ( target , mode ) if args [ 'target' ] != args [ 'config' ] [ 'core' ] [ 'ctrlchan' ] : send ( "Mode \"%s\" on %s by %s" % ( mode , target , args [ 'nick' ] ) , target = args [ 'config' ] [ 'core' ] [ 'ctrlchan' ] )
Sets a mode .
323
5
14,714
def as_json ( context ) : info = { 'info' : cgi . escape ( pprint . pformat ( context . context ) ) , } return Response ( content_type = 'application/json' , body = json . dumps ( info ) )
Return an object s representation as JSON
55
7
14,715
def as_tree ( context ) : tree = _build_tree ( context , 2 , 1 ) if type ( tree ) == dict : tree = [ tree ] return Response ( content_type = 'application/json' , body = json . dumps ( tree ) )
Return info about an object s members as JSON
56
9
14,716
def main ( ) : character1 = Character ( 'Albogh' , str = 4 , int = 7 , sta = 50 ) character2 = Character ( 'Zoltor' , str = 6 , int = 6 , sta = 70 ) print ( 'PLAYER1 [start]:' , character1 ) print ( 'PLAYER2 [start]:' , character2 ) b = Battle ( character1 , character2 ) print ( b ) print ( 'PLAYER1 [end]:' , character1 ) print ( 'PLAYER2 [end]:' , character2 )
Prototype to see how an RPG simulation might be used in the AIKIF framework . The idea is to build a simple character and run a simulation to see how it succeeds in a random world against another players character character stats world locations
123
47
14,717
def fight ( self , moves = 10 ) : for i in range ( 1 , moves ) : # player 1 result , dmg = self . calc_move ( self . c1 , self . c2 ) print ( self . c1 . name + ' ' + result + ' for ' + str ( dmg ) ) self . c1 . sta = self . c1 . sta - dmg if self . is_character_dead ( self . c1 ) : print ( self . c1 . name + ' has died' ) return # player 2 result , dmg = self . calc_move ( self . c2 , self . c1 ) print ( self . c2 . name + ' ' + result + ' for ' + str ( dmg ) ) self . c2 . sta = self . c2 . sta - dmg if self . is_character_dead ( self . c2 ) : print ( self . c2 . name + ' has died' ) return
runs a series of fights
206
5
14,718
def duration ( self ) : if self . completion_ts : end = self . completed else : end = datetime . utcnow ( ) return end - self . started
Return a timedelta for this build .
36
8
14,719
def estimate_completion ( self ) : if self . state != build_states . BUILDING : # Build is already complete. Return the exact completion time: defer . returnValue ( self . completed ) avg_delta = yield self . connection . getAverageBuildDuration ( self . name ) est_completion = self . started + avg_delta defer . returnValue ( est_completion )
Estimate completion time for a build .
85
8
14,720
def target ( self ) : task = yield self . task ( ) if not task : yield defer . succeed ( None ) defer . returnValue ( None ) defer . returnValue ( task . target )
Find the target name for this build .
41
8
14,721
def task ( self ) : # If we have no .task_id, this is a no-op to return None. if not self . task_id : return defer . succeed ( None ) return self . connection . getTaskInfo ( self . task_id )
Find the task for this build .
56
7
14,722
def task_id ( self ) : if self [ 'task_id' ] : return self [ 'task_id' ] if self . extra and 'container_koji_task_id' in self . extra : return self . extra [ 'container_koji_task_id' ]
Hack to return a task ID for a build including container CG builds .
63
14
14,723
def get_images ( self , obj ) : return { str ( i . tag ) : i . image . url for i in obj . images . all ( ) }
Object of images serialized by tag name .
35
9
14,724
def get_override_winner ( self , obj ) : if obj . election . division . level . name == DivisionLevel . DISTRICT : division = obj . election . division . parent else : division = obj . election . division vote = obj . votes . filter ( division = division ) . first ( ) return vote . winning if vote else False
Winner marked in backend .
71
5
14,725
def get_override_votes ( self , obj ) : if hasattr ( obj , "meta" ) : # TODO: REVISIT THIS if obj . meta . override_ap_votes : all_votes = None for ce in obj . candidate_elections . all ( ) : if all_votes : all_votes = all_votes | ce . votes . all ( ) else : all_votes = ce . votes . all ( ) return VotesSerializer ( all_votes , many = True ) . data return False
Votes entered into backend . Only used if override_ap_votes = True .
112
17
14,726
def save ( self , * args , * * kwargs ) : if self . publication : publication = self . publication if not self . title : self . title = publication . title if not self . subtitle : first_author = publication . first_author if first_author == publication . last_author : authors = first_author else : authors = '{} et al.' . format ( first_author ) self . subtitle = '{}, {} ({})' . format ( authors , publication . journal , publication . year ) if not self . description : self . description = publication . abstract if self . publication . year and not self . pk : delta = timezone . now ( ) - self . publish_datetime if self . publish_datetime <= timezone . now ( ) and delta . days == 0 : self . publish_datetime = datetime . datetime ( year = int ( self . publication . year ) , month = int ( self . publication . month or 1 ) , day = int ( self . publication . day or 1 ) , ) super ( ) . save ( * args , * * kwargs )
Before saving if slide is for a publication use publication info for slide s title subtitle description .
238
18
14,727
def execute ( self , context = None , stdout = None , stderr = None ) : total_benchmark = Benchmark ( ) self . context = context or Context ( ) if self . _is_collection ( ) : self . stdout = sys . stdout self . stderr = sys . stderr else : self . stdout = stdout or StringIO ( ) self . stderr = stderr or StringIO ( ) self . _record_start_example ( self . formatter ) try : with total_benchmark , Replace ( sys , 'stdout' , self . stdout ) , Replace ( sys , 'stderr' , self . stderr ) : self . _setup ( ) self . _execute ( ) self . _teardown ( ) except Exception as e : self . example . error = e self . example . traceback = sys . exc_info ( ) [ 2 ] #traceback.format_exc() finally : self . example . real_time = total_benchmark . total_time self . _record_end_example ( self . formatter ) self . context = None self . example . stdout = self . stdout self . example . stderr = self . stderr return self . example . error is None
Does all the work of running an example .
277
9
14,728
def run ( self , context = None , stdout = None , stderr = None ) : if self . should_skip ( ) : self . _record_skipped_example ( self . formatter ) self . num_skipped += 1 else : self . execute ( context , stdout , stderr ) return self . num_successes , self . num_failures , self . num_skipped
Like execute but records a skip if the should_skip method returns True .
89
15
14,729
def _setup ( self ) : self . example . error = None self . example . traceback = '' # inject function contexts from parent functions c = Context ( parent = self . context ) #for parent in reversed(self.example.parents): # c._update_properties(locals_from_function(parent)) self . context = c if self . is_root_runner : run . before_all . execute ( self . context ) self . example . before ( self . context )
Resets the state and prepares for running the example .
103
11
14,730
def _execute_example_group ( self ) : for example in self . example : runner = self . __class__ ( example , self . formatter ) runner . is_root_runner = False successes , failures , skipped = runner . run ( self . context ) self . num_successes += successes self . num_failures += failures self . num_skipped += skipped
Handles the execution of Example Group
80
7
14,731
def _execute_example ( self ) : test_benchmark = Benchmark ( ) try : with Registry ( ) , test_benchmark : if accepts_arg ( self . example . testfn ) : self . example . testfn ( self . context ) else : self . context . inject_into_self ( self . example . testfn ) self . example . testfn ( ) self . num_successes += 1 except KeyboardInterrupt : # bubble interrupt for canceling spec execution raise except : raise self . num_failures += 1 finally : self . example . user_time = test_benchmark . total_time
Handles the execution of the Example
132
7
14,732
def _teardown ( self ) : self . example . after ( self . context ) if self . is_root_runner : run . after_all . execute ( self . context ) #self.context = self.context._parent self . has_ran = True
Handles the restoration of any potential global state set .
57
11
14,733
def pypy_json_encode ( value , pretty = False ) : global _dealing_with_problem if pretty : return pretty_json ( value ) try : _buffer = UnicodeBuilder ( 2048 ) _value2json ( value , _buffer ) output = _buffer . build ( ) return output except Exception as e : # THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS from mo_logs import Log if _dealing_with_problem : Log . error ( "Serialization of JSON problems" , e ) else : Log . warning ( "Serialization of JSON problems" , e ) _dealing_with_problem = True try : return pretty_json ( value ) except Exception as f : Log . error ( "problem serializing object" , f ) finally : _dealing_with_problem = False
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
185
14
14,734
def problem_serializing ( value , e = None ) : from mo_logs import Log try : typename = type ( value ) . __name__ except Exception : typename = "<error getting name>" try : rep = text_type ( repr ( value ) ) except Exception as _ : rep = None if rep == None : Log . error ( "Problem turning value of type {{type}} to json" , type = typename , cause = e ) else : Log . error ( "Problem turning value ({{value}}) of type {{type}} to json" , value = rep , type = typename , cause = e )
THROW ERROR ABOUT SERIALIZING
133
9
14,735
def unicode_key ( key ) : if not isinstance ( key , ( text_type , binary_type ) ) : from mo_logs import Log Log . error ( "{{key|quote}} is not a valid key" , key = key ) return quote ( text_type ( key ) )
CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME
65
15
14,736
def cmd ( send , _ , args ) : curr = datetime . now ( ) uptime = args [ 'handler' ] . uptime load_avg = ', ' . join ( [ str ( x ) for x in os . getloadavg ( ) ] ) starttime = curr - uptime [ 'start' ] reloaded = curr - uptime [ 'reloaded' ] send ( "Time since start: %s, load average: %s" % ( starttime , load_avg ) ) send ( "Time since reload: %s" % reloaded )
Shows the bot s uptime .
127
8
14,737
def pcwd ( func ) : @ wraps ( func ) def inner ( * args , * * kw ) : with PreserveWorkingDirectory ( ) : return func ( * args , * * kw ) return inner
A decorator to provide the functionality of the PreserveWorkingDirectory context manager for functions and methods .
45
20
14,738
def cmd ( send , msg , _ ) : msg = msg . encode ( 'utf-8' ) send ( hashlib . sha512 ( msg ) . hexdigest ( ) )
SHA512 hashes something .
40
5
14,739
def get_queryset ( self ) : try : date = ElectionDay . objects . get ( date = self . kwargs [ 'date' ] ) except Exception : raise APIException ( 'No elections on {}.' . format ( self . kwargs [ 'date' ] ) ) office_ids = [ ] for election in date . elections . all ( ) : office = election . race . office if not office . body : office_ids . append ( office . uid ) return Office . objects . filter ( uid__in = office_ids )
Returns a queryset of all executive offices holding an election on a date .
119
16
14,740
def cmd ( send , msg , args ) : implements = [ 'the golden gate bridge' , 'a large trout' , 'a clue-by-four' , 'a fresh haddock' , 'moon' , 'an Itanium' , 'fwilson' , 'a wombat' ] methods = [ 'around a bit' , 'upside the head' ] if not msg : channel = args [ 'target' ] if args [ 'target' ] != 'private' else args [ 'config' ] [ 'core' ] [ 'channel' ] with args [ 'handler' ] . data_lock : users = list ( args [ 'handler' ] . channels [ channel ] . users ( ) ) slap = 'slaps %s %s with %s' send ( slap % ( choice ( users ) , choice ( methods ) , choice ( implements ) ) , 'action' ) else : reason = '' method = choice ( methods ) implement = '' msg = msg . split ( ) slapee = msg [ 0 ] # Basic and stupid NLP! i = 1 args = False while i < len ( msg ) : if msg [ i ] == 'for' : args = True if reason : send ( "Invalid Syntax: You can only have one for clause!" ) return i += 1 while i < len ( msg ) : if msg [ i ] == 'with' : break reason += " " reason += msg [ i ] i += 1 reason = reason . strip ( ) elif msg [ i ] == 'with' : args = True if implement : send ( "Invalid Synatx: You can only have one with clause!" ) return i += 1 while i < len ( msg ) : if msg [ i ] == 'for' : break implement += msg [ i ] implement += ' ' i += 1 implement = implement . strip ( ) elif not args : slapee += ' ' + msg [ i ] i += 1 if not implement : implement = choice ( implements ) if reason : slap = 'slaps %s %s with %s for %s' % ( slapee , method , implement , reason ) else : slap = 'slaps %s %s with %s' % ( slapee , method , implement ) send ( slap , 'action' )
Slap somebody .
486
4
14,741
def cmd ( send , msg , args ) : if not msg : with args [ 'handler' ] . data_lock : users = list ( args [ 'handler' ] . channels [ args [ 'target' ] ] . users ( ) ) if args [ 'target' ] != 'private' else [ args [ 'nick' ] ] msg = choice ( users ) chain = get_chain ( args [ 'db' ] , msg ) if chain : send ( " -> " . join ( chain ) ) else : send ( "%s has never changed their nick." % msg )
Gets previous nicks .
121
6
14,742
def clear_database ( engine : Connectable , schemas : Iterable [ str ] = ( ) ) -> None : assert check_argument_types ( ) metadatas = [ ] all_schemas = ( None , ) # type: Tuple[Optional[str], ...] all_schemas += tuple ( schemas ) for schema in all_schemas : # Reflect the schema to get the list of the tables, views and constraints metadata = MetaData ( ) metadata . reflect ( engine , schema = schema , views = True ) metadatas . append ( metadata ) for metadata in metadatas : metadata . drop_all ( engine , checkfirst = False )
Clear any tables from an existing database .
147
8
14,743
def cmd ( send , msg , args ) : if not hasattr ( args [ 'handler' ] , msg ) : send ( "That attribute was not found in the handler." ) return send ( str ( getattr ( args [ 'handler' ] , msg ) ) )
Inspects a bot attribute .
57
7
14,744
def cmd ( send , msg , args ) : if not msg : send ( "Invalid Syntax." ) return char = msg [ 0 ] msg = [ x . replace ( r'\/' , '/' ) for x in re . split ( r'(?<!\\)\%s' % char , msg [ 1 : ] , maxsplit = 2 ) ] # fix for people who forget a trailing slash if len ( msg ) == 2 and args [ 'config' ] [ 'feature' ] . getboolean ( 'lazyregex' ) : msg . append ( '' ) # not a valid sed statement. if not msg or len ( msg ) < 3 : send ( "Invalid Syntax." ) return if args [ 'type' ] == 'privmsg' : send ( "Don't worry, %s is not a grammar Nazi." % args [ 'botnick' ] ) return string = msg [ 0 ] replacement = msg [ 1 ] modifiers = get_modifiers ( msg [ 2 ] , args [ 'nick' ] , args [ 'config' ] [ 'core' ] [ 'nickregex' ] ) if modifiers is None : send ( "Invalid modifiers." ) return try : regex = re . compile ( string , re . IGNORECASE ) if modifiers [ 'ignorecase' ] else re . compile ( string ) log = get_log ( args [ 'db' ] , args [ 'target' ] , modifiers [ 'nick' ] ) workers = args [ 'handler' ] . workers result = workers . run_pool ( do_replace , [ log , args [ 'config' ] [ 'core' ] , char , regex , replacement ] ) try : msg = result . get ( 5 ) except multiprocessing . TimeoutError : workers . restart_pool ( ) send ( "Sed regex timed out." ) return if msg : send ( msg ) else : send ( "No match found." ) except sre_constants . error as ex : raise CommandFailedException ( ex )
Corrects a previous message .
429
6
14,745
def resize ( self , size ) : count = max ( int ( size ) , 0 ) - len ( self ) if count == 0 : pass elif - count == len ( self ) : self . _value = bytes ( ) elif count > 0 : self . _value += b'\x00' * count else : self . _value = self . _value [ : count ] size = len ( self ) self . _bit_size = size * 8 self . _align_to_byte_size = size
Re - sizes the Stream field by appending zero bytes or removing bytes from the end .
110
18
14,746
def value ( self ) : length = self . _value . find ( b'\x00' ) if length >= 0 : return self . _value [ : length ] . decode ( 'ascii' ) else : return self . _value . decode ( 'ascii' )
Field value as an ascii encoded string .
61
10
14,747
def _set_alignment ( self , group_size , bit_offset = 0 , auto_align = False ) : # Field alignment offset field_offset = int ( bit_offset ) # Auto alignment if auto_align : # Field alignment size field_size , bit_offset = divmod ( field_offset , 8 ) if bit_offset is not 0 : field_size += 1 field_size = max ( field_size , 1 ) # No auto alignment else : # Field alignment size field_size = int ( group_size ) # Field alignment alignment = Alignment ( field_size , field_offset ) # Invalid field alignment size if field_size not in range ( 1 , 8 ) : raise FieldAlignmentError ( self , self . index , alignment ) # Invalid field alignment offset if not ( 0 <= field_offset <= 63 ) : raise FieldAlignmentError ( self , self . index , alignment ) # Invalid field alignment if field_offset >= field_size * 8 : raise FieldAlignmentError ( self , self . index , alignment ) # Set field alignment self . _align_to_byte_size = alignment . byte_size self . _align_to_bit_offset = alignment . bit_offset
Sets the alignment of the Decimal field .
258
10
14,748
def value ( self ) : if self . _enum and issubclass ( self . _enum , Enumeration ) : name = self . _enum . get_name ( self . _value ) if name : return name return self . _value
Field value as an enum name string . Fall back is an unsigned integer number .
52
16
14,749
def to_dict ( self ) : d = dict ( doses = self . doses , ns = self . ns , incidences = self . incidences ) d . update ( self . kwargs ) return d
Returns a dictionary representation of the dataset .
44
8
14,750
def _calculate_plotting ( n , incidence ) : p = incidence / float ( n ) z = stats . norm . ppf ( 0.975 ) q = 1. - p ll = ( ( 2 * n * p + 2 * z - 1 ) - z * np . sqrt ( 2 * z - ( 2 + 1 / n ) + 4 * p * ( n * q + 1 ) ) ) / ( 2 * ( n + 2 * z ) ) ul = ( ( 2 * n * p + 2 * z + 1 ) + z * np . sqrt ( 2 * z + ( 2 + 1 / n ) + 4 * p * ( n * q - 1 ) ) ) / ( 2 * ( n + 2 * z ) ) return p , ll , ul
Add confidence intervals to dichotomous datasets . From bmds231_manual . pdf pg 124 - 5 .
169
24
14,751
def cd ( path , on = os ) : original = on . getcwd ( ) on . chdir ( path ) yield on . chdir ( original )
Change the current working directory within this context . Preserves the previous working directory and can be applied to remote connections that offer
34
24
14,752
def url_builder ( self , endpoint , * , root = None , params = None , url_params = None ) : if root is None : root = self . ROOT return '' . join ( [ root , endpoint , '?' + urlencode ( url_params ) if url_params else '' , ] ) . format ( * * params or { } )
Create a URL for the specified endpoint .
77
8
14,753
def from_env ( cls ) : token = getenv ( cls . TOKEN_ENV_VAR ) if token is None : msg = 'missing environment variable: {!r}' . format ( cls . TOKEN_ENV_VAR ) raise ValueError ( msg ) return cls ( api_token = token )
Create a service instance from an environment variable .
74
9
14,754
def url_builder ( self , endpoint , params = None , url_params = None ) : if url_params is None : url_params = OrderedDict ( ) url_params [ self . AUTH_PARAM ] = self . api_token return super ( ) . url_builder ( endpoint , params = params , url_params = url_params , )
Add authentication URL parameter .
78
5
14,755
def cmd ( send , msg , args ) : if msg and not check_exists ( msg ) : send ( "Non-existant subreddit." ) return subreddit = msg if msg else None send ( random_post ( subreddit , args [ 'config' ] [ 'api' ] [ 'bitlykey' ] ) )
Gets a random Reddit post .
68
7
14,756
def cmd ( send , msg , args ) : if not msg : send ( "Choose what?" ) return choices = msg . split ( ' or ' ) action = [ 'draws a slip of paper from a hat and gets...' , 'says eenie, menie, miney, moe and chooses...' , 'picks a random number and gets...' , 'rolls dice and gets...' , 'asks a random person and gets...' , 'plays rock, paper, scissors, lizard, spock and gets...' ] send ( "%s %s" % ( choice ( action ) , choice ( choices ) ) , 'action' )
Chooses between multiple choices .
141
6
14,757
def cmd ( send , msg , args ) : if not msg or len ( msg . split ( ) ) < 2 : send ( "Pester needs at least two arguments." ) return match = re . match ( '(%s+) (.*)' % args [ 'config' ] [ 'core' ] [ 'nickregex' ] , msg ) if match : message = match . group ( 2 ) + " " send ( '%s: %s' % ( match . group ( 1 ) , message * 3 ) ) else : send ( "Invalid Syntax." )
Pesters somebody .
120
4
14,758
def get_model ( cls , version , model_name ) : models = cls . versions [ version ] . model_options for keystore in models . values ( ) : if model_name in keystore : return keystore [ model_name ] raise ValueError ( "Unknown model name" )
Return BMDS model class given BMDS version and model - name .
64
14
14,759
def _add_to_to_ordered_dict ( self , d , dataset_index , recommended_only = False ) : if self . doses_dropped_sessions : for key in sorted ( list ( self . doses_dropped_sessions . keys ( ) ) ) : session = self . doses_dropped_sessions [ key ] session . _add_single_session_to_to_ordered_dict ( d , dataset_index , recommended_only ) self . _add_single_session_to_to_ordered_dict ( d , dataset_index , recommended_only )
Save a session to an ordered dictionary . In some cases a single session may include a final session as well as other BMDS executions where doses were dropped . This will include all sessions .
129
37
14,760
def _add_single_session_to_to_ordered_dict ( self , d , dataset_index , recommended_only ) : for model_index , model in enumerate ( self . models ) : # determine if model should be presented, or if a null-model should # be presented (if no model is recommended.) show_null = False if recommended_only : if self . recommendation_enabled : if self . recommended_model is None : if model_index == 0 : show_null = True else : continue elif self . recommended_model == model : pass else : continue else : if model_index == 0 : show_null = True else : continue d [ "dataset_index" ] . append ( dataset_index ) d [ "doses_dropped" ] . append ( self . doses_dropped ) model . _to_df ( d , model_index , show_null )
Save a single session to an ordered dictionary .
195
9
14,761
def _group_models ( self ) : od = OrderedDict ( ) # Add models to appropriate list. We only aggregate models which # completed successfully and have a valid AIC and BMD. for i , model in enumerate ( self . models ) : output = getattr ( model , "output" , { } ) if output . get ( "AIC" ) and output . get ( "BMD" ) and output [ "BMD" ] > 0 : key = "{}-{}" . format ( output [ "AIC" ] , output [ "BMD" ] ) if key in od : od [ key ] . append ( model ) else : od [ key ] = [ model ] else : od [ i ] = [ model ] # Sort each list by the number of parameters def _get_num_params ( model ) : return ( len ( model . output [ "parameters" ] ) if hasattr ( model , "output" ) and "parameters" in model . output else 0 ) for key , _models in od . items ( ) : _models . sort ( key = _get_num_params ) return list ( od . values ( ) )
If AIC and BMD are numeric and identical then treat models as identical . Returns a list of lists . The outer list is a list of related models the inner list contains each individual model sorted by the number of parameters in ascending order .
251
48
14,762
def is_numeric ( obj ) : try : obj + obj , obj - obj , obj * obj , obj ** obj , obj / obj except ZeroDivisionError : return True except Exception : return False else : return True
This detects whether an input object is numeric or not .
47
11
14,763
def handle ( send , msg , args ) : session = args [ 'db' ] matches = re . findall ( r"\b(?<!-)(%s{2,16})(\+\+|--)" % args [ 'config' ] [ 'core' ] [ 'nickregex' ] , msg ) if not matches : return if args [ 'type' ] == 'privmsg' : send ( 'Hey, no points in private messages!' ) return for match in matches : # limit to 5 score changes per minute if args [ 'abuse' ] ( args [ 'nick' ] , 5 , 'scores' ) : return name , direction = match [ 0 ] . lower ( ) , match [ 1 ] if direction == "++" : score = 1 if name == args [ 'nick' ] . lower ( ) : send ( "%s: No self promotion! You lose 10 points." % args [ 'nick' ] ) score = - 10 else : score = - 1 row = session . query ( Scores ) . filter ( Scores . nick == name ) . first ( ) if row is None : session . add ( Scores ( score = score , nick = name ) ) session . commit ( ) else : row . score += score session . commit ( )
Handles scores .
271
4
14,764
def handle ( send , msg , args ) : output = textutils . gen_xkcd_sub ( msg , True ) if output is None : return if args [ 'type' ] == 'action' : send ( "correction: * %s %s" % ( args [ 'nick' ] , output ) ) else : send ( "%s actually meant: %s" % ( args [ 'nick' ] , output ) )
Implements several XKCD comics .
93
9
14,765
def cmd ( send , msg , args ) : c , nick = args [ 'handler' ] . connection , args [ 'nick' ] channel = args [ 'target' ] if args [ 'target' ] != 'private' else args [ 'config' ] [ 'core' ] [ 'channel' ] if not msg : send ( "Nuke who?" ) return with args [ 'handler' ] . data_lock : users = args [ 'handler' ] . channels [ channel ] . users ( ) if msg in users : do_nuke ( c , nick , msg , channel ) elif msg == args [ 'botnick' ] : send ( "Sorry, Self-Nuking is disabled pending aquisition of a Lead-Lined Fridge." ) else : send ( "I'm sorry. Anonymous Nuking is not allowed" )
Nukes somebody .
180
4
14,766
def trim ( s , prefix = None , suffix = None , strict = False ) : ensure_string ( s ) has_prefix = prefix is not None has_suffix = suffix is not None if has_prefix == has_suffix : raise ValueError ( "exactly one of either prefix or suffix must be provided" ) if has_prefix : ensure_string ( prefix ) if s . startswith ( prefix ) : return s [ len ( prefix ) : ] elif strict : raise ValueError ( "string %r does not start with expected prefix %r" % ( s , prefix ) ) if has_suffix : ensure_string ( suffix ) if s . endswith ( suffix ) : return s [ : - len ( suffix ) ] if suffix else s elif strict : raise ValueError ( "string %r does not end with expected suffix %r" % ( s , suffix ) ) return s
Trim a string removing given prefix or suffix .
193
10
14,767
def join ( delimiter , iterable , * * kwargs ) : ensure_string ( delimiter ) ensure_iterable ( iterable ) ensure_keyword_args ( kwargs , optional = ( 'errors' , 'with_' ) ) errors = kwargs . get ( 'errors' , True ) if errors in ( 'raise' , True ) : iterable = imap ( ensure_string , iterable ) elif errors in ( 'ignore' , None ) : iterable = ifilter ( is_string , iterable ) elif errors in ( 'cast' , False ) : iterable = imap ( delimiter . __class__ , iterable ) elif errors == 'replace' : if 'with_' not in kwargs : raise ValueError ( "'replace' error policy requires specifying " "replacement through with_=" ) with_ = kwargs [ 'with_' ] if is_string ( with_ ) : replacement = lambda x : with_ elif callable ( with_ ) : replacement = with_ else : raise TypeError ( "error replacement must be a string or function, " "got %s" % type ( with_ ) . __name__ ) iterable = ( x if is_string ( x ) else ensure_string ( replacement ( x ) ) for x in iterable ) else : raise TypeError ( "%r is not a valid error handling policy for join()" % ( errors , ) ) return delimiter . join ( iterable )
Returns a string which is a concatenation of strings in iterable separated by given delimiter .
322
20
14,768
def camel_case ( arg , capitalize = None ) : ensure_string ( arg ) if not arg : return arg words = split ( arg ) first_word = words [ 0 ] if len ( words ) > 0 else None words = [ word . capitalize ( ) for word in words ] if first_word is not None : if capitalize is True : first_word = first_word . capitalize ( ) elif capitalize is False : first_word = first_word [ 0 ] . lower ( ) + first_word [ 1 : ] words [ 0 ] = first_word return join ( arg . __class__ ( ) , words )
Converts given text with whitespaces between words into equivalent camel - cased one .
133
17
14,769
def random ( length , chars = None ) : if chars is None : chars = string . ascii_letters + string . digits else : ensure_string ( chars ) if not chars : raise ValueError ( "character set must not be empty" ) if is_pair ( length ) : length = randint ( * length ) elif isinstance ( length , Integral ) : if not length > 0 : raise ValueError ( "random string length must be positive (got %r)" % ( length , ) ) else : raise TypeError ( "random string length must be an integer; " "got '%s'" % type ( length ) . __name__ ) return join ( chars . __class__ ( ) , ( choice ( chars ) for _ in xrange ( length ) ) )
Generates a random string .
166
6
14,770
def with_ ( self , replacement ) : ensure_string ( replacement ) if is_mapping ( self . _replacements ) : raise ReplacementError ( "string replacements already provided" ) self . _replacements = dict . fromkeys ( self . _replacements , replacement ) return self
Provide replacement for string needles .
60
7
14,771
def in_ ( self , haystack ) : from taipan . collections import dicts ensure_string ( haystack ) if not is_mapping ( self . _replacements ) : raise ReplacementError ( "string replacements not provided" ) # handle special cases if not self . _replacements : return haystack if len ( self . _replacements ) == 1 : return haystack . replace ( * dicts . peekitem ( self . _replacements ) ) # construct a regex matching any of the needles in the order # of descending length (to prevent issues if they contain each other) or_ = haystack . __class__ ( '|' ) regex = join ( or_ , imap ( re . escape , sorted ( self . _replacements , key = len , reverse = True ) ) ) # do the substituion, looking up the replacement for every match do_replace = lambda match : self . _replacements [ match . group ( ) ] return re . sub ( regex , do_replace , haystack )
Perform replacement in given string .
217
7
14,772
def wrap_list ( item ) : if item is None : return [ ] elif isinstance ( item , list ) : return item elif isinstance ( item , ( tuple , set ) ) : return list ( item ) else : return [ item ]
Returns an object as a list .
53
7
14,773
def update_additive ( dict1 , dict2 ) : for key , value in dict2 . items ( ) : if key not in dict1 : dict1 [ key ] = value else : # key in dict1 if isinstance ( dict1 [ key ] , collections . Mapping ) : assert isinstance ( value , collections . Mapping ) update_additive ( dict1 [ key ] , value ) else : # value is not a mapping type assert not isinstance ( value , collections . Mapping ) dict1 [ key ] = value
A utility method to update a dict or other mapping type with the contents of another dict .
114
18
14,774
def diff_dir ( dir_cmp , left_path = True ) : for name in dir_cmp . diff_files : if left_path : path_root = dir_cmp . left else : path_root = dir_cmp . right yield path . joinpath ( path_root , name ) for sub in dir_cmp . subdirs . values ( ) : # Need to iterate over the recursive call to make sure the individual values are yielded up the stack for the_dir in diff_dir ( sub , left_path ) : yield the_dir
A generator that given a filecmp . dircmp object yields the paths to all files that are different . Works recursively .
119
27
14,775
def get_params ( self , * keys ) : if len ( keys ) == 0 : return vars ( self ) else : return [ vars ( self ) [ k ] for k in keys ]
Returns the specified parameters for the current preprocessor .
42
10
14,776
def _import_single_searches ( self ) : searches = { # (?<!Setting ) is a special case for preventing # "Setting BMD = 100*(maximum dose)" matches "BMD" : r"(?<!Setting )BMD = +(%s)" % self . re_num , "BMDL" : r"BMDL = +(%s)" % self . re_num , "BMDU" : r"BMDU = +(%s)" % self . re_num , "CSF" : r"Cancer Slope Factor = +(%s)" % self . re_num , "AIC" : r"AIC: +(%s)" % ( self . re_num ) , "model_version" : r"Version: ([\d\.]+);" , "model_date" : r"Date: ([\d/]+)" , } for search in searches : m = re . search ( searches [ search ] , self . output_text ) if m : try : self . output [ search ] = float ( m . group ( 1 ) ) except : self . output [ search ] = m . group ( 1 ) else : self . output [ search ] = - 999
Look for simple one - line regex searches common across dataset types .
270
13
14,777
def _import_warnings ( self ) : warnings = ( r"Warning: BMDL computation is at best imprecise for these data" , r"THE MODEL HAS PROBABLY NOT CONVERGED!!!" , "THIS USUALLY MEANS THE MODEL HAS NOT CONVERGED!" , r"BMR value is not in the range of the mean function" , r"BMD = 100\*\(maximum dose\)" , r"BMDL computation failed\." , "Warning: optimum may not have been found. Bad completion code in Optimization routine." , # noqa "Warning: Likelihood for fitted model larger than the Likelihood for model A3." , # noqa ) self . output [ "warnings" ] = [ ] for warning in warnings : m = re . search ( warning , self . output_text ) if m : self . output [ "warnings" ] . append ( m . group ( ) )
Add custom warnings found in output files .
205
8
14,778
def _import_dich_vals ( self ) : m = re . search ( r"Chi\^2 = ({0}|\w+) +d.f. = +({0}|\w+) +P-value = +({0}|\w+)" . format ( self . re_num ) , # noqa self . output_text , ) cw = { 1 : "Chi2" , 2 : "df" , 3 : "p_value4" } for val in cw : try : self . output [ cw [ val ] ] = float ( m . group ( val ) ) except : self . output [ cw [ val ] ] = - 999
Import simple dichotomous values .
151
7
14,779
def transformation ( func ) : @ wraps ( func ) def func_as_transformation ( * args , * * kwargs ) : # When using transforms that return new ndarrays we lose the # jicimagelib.image.Image type and the history of the image. # One therefore needs to: # - Extract the history from the input jicimagelib.image.Image. # - Apply the transformation, which may return a numpy ndarray. # - Force the image to the jicimagelib.image.Image type. # - Re-attach the extracted history if hasattr ( args [ 0 ] , 'history' ) : # Working on jicimagelib.Image. history = args [ 0 ] . history else : # Working on something without a history, e.g. a ndarray stack. history = [ ] image = func ( * args , * * kwargs ) image = Image . from_array ( image , log_in_history = False ) image . history = history image . history . append ( 'Applied {} transform' . format ( func . __name__ ) ) if AutoWrite . on : fpath = AutoName . name ( func ) try : if AutoWrite . auto_safe_dtype : safe_range_im = 255 * normalise ( image ) pil_im = PIL . Image . fromarray ( safe_range_im . astype ( np . uint8 ) ) else : pil_im = PIL . Image . fromarray ( image ) except TypeError : # Give a more meaningful error message. raise ( TypeError ( "Cannot handle this data type: {}" . format ( image . dtype ) ) ) pil_im . save ( fpath ) return image return func_as_transformation
Function decorator to turn another function into a transformation .
382
11
14,780
def smooth_gaussian ( image , sigma = 1 ) : return scipy . ndimage . filters . gaussian_filter ( image , sigma = sigma , mode = "nearest" )
Returns Gaussian smoothed image .
45
7
14,781
def equalize_adaptive_clahe ( image , ntiles = 8 , clip_limit = 0.01 ) : # Convert input for skimage. skimage_float_im = normalise ( image ) if np . all ( skimage_float_im ) : raise ( RuntimeError ( "Cannot equalise when there is no variation." ) ) normalised = skimage . exposure . equalize_adapthist ( skimage_float_im , ntiles_x = ntiles , ntiles_y = ntiles , clip_limit = clip_limit ) assert np . max ( normalised ) == 1.0 assert np . min ( normalised ) == 0.0 return normalised
Return contrast limited adaptive histogram equalized image . The return value is normalised to the range 0 to 1 .
156
23
14,782
def threshold_otsu ( image , multiplier = 1.0 ) : otsu_value = skimage . filters . threshold_otsu ( image ) return image > otsu_value * multiplier
Return image thresholded using Otsu s method .
43
11
14,783
def cmd ( send , msg , args ) : parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( 'action' , choices = [ 'check' , 'master' , 'commit' ] , nargs = '?' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return api_output = get ( 'https://api.github.com/repos/%s/branches/master' % args [ 'config' ] [ 'api' ] [ 'githubrepo' ] ) . json ( ) commit , version = misc . get_version ( args [ 'handler' ] . confdir ) if not cmdargs . action : send ( version ) return if cmdargs . action == 'master' : send ( api_output [ 'commit' ] [ 'sha' ] ) elif cmdargs . action == 'check' : if commit is None : send ( "Not running from git, version %s" % version ) else : check = 'Same' if api_output [ 'commit' ] [ 'sha' ] == commit else 'Different' send ( check ) elif cmdargs . action == 'commit' : if commit is None : send ( "Not running from git, version %s" % version ) else : send ( commit )
Check the git revison .
292
6
14,784
def cmd ( send , _ , args ) : send ( "%s! %s" % ( args [ 'name' ] . upper ( ) , random . choice ( squirrels ) ) )
Ships a product .
40
5
14,785
def getAsGrassAsciiRaster ( self , tableName , rasterId = 1 , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , newSRID = None ) : # Get raster in ArcInfo Grid format arcInfoGrid = self . getAsGdalRaster ( rasterFieldName , tableName , rasterIdFieldName , rasterId , 'AAIGrid' , newSRID ) . splitlines ( ) ## Convert arcInfoGrid to GRASS ASCII format ## # Get values from header which look something this: # ncols 67 # nrows 55 # xllcorner 425802.32143212341 # yllcorner 44091450.41551345213 # cellsize 90.0000000 # ... nCols = int ( arcInfoGrid [ 0 ] . split ( ) [ 1 ] ) nRows = int ( arcInfoGrid [ 1 ] . split ( ) [ 1 ] ) xLLCorner = float ( arcInfoGrid [ 2 ] . split ( ) [ 1 ] ) yLLCorner = float ( arcInfoGrid [ 3 ] . split ( ) [ 1 ] ) cellSize = float ( arcInfoGrid [ 4 ] . split ( ) [ 1 ] ) # Remove old headers for i in range ( 0 , 5 ) : arcInfoGrid . pop ( 0 ) # Check for NODATA_value row and remove if it is there if 'NODATA_value' in arcInfoGrid [ 0 ] : arcInfoGrid . pop ( 0 ) ## Calculate values for GRASS ASCII headers ## # These should look like this: # north: 4501028.972140 # south: 4494548.972140 # east: 460348.288604 # west: 454318.288604 # rows: 72 # cols: 67 # ... # xLLCorner and yLLCorner represent the coordinates for the Lower Left corner of the raster north = yLLCorner + ( cellSize * nRows ) south = yLLCorner east = xLLCorner + ( cellSize * nCols ) west = xLLCorner # Create header Lines (the first shall be last and the last shall be first) grassHeader = [ 'cols: %s' % nCols , 'rows: %s' % nRows , 'west: %s' % west , 'east: %s' % east , 'south: %s' % south , 'north: %s' % north ] # Insert grass headers into the grid for header in grassHeader : arcInfoGrid . insert ( 0 , header ) # Create string arcInfoGridString = '\n' . join ( arcInfoGrid ) return arcInfoGridString
Returns a string representation of the raster in GRASS ASCII raster format .
589
16
14,786
def supportedGdalRasterFormats ( cls , sqlAlchemyEngineOrSession ) : if isinstance ( sqlAlchemyEngineOrSession , Engine ) : # Create sqlalchemy session sessionMaker = sessionmaker ( bind = sqlAlchemyEngineOrSession ) session = sessionMaker ( ) elif isinstance ( sqlAlchemyEngineOrSession , Session ) : session = sqlAlchemyEngineOrSession # Execute statement statement = 'SELECT * FROM st_gdaldrivers() ORDER BY short_name;' result = session . execute ( statement ) supported = dict ( ) for row in result : supported [ row [ 1 ] ] = { 'description' : row [ 2 ] , 'options' : row [ 3 ] } return supported
Return a list of the supported GDAL raster formats .
155
12
14,787
def setColorRamp ( self , colorRamp = None ) : if not colorRamp : self . _colorRamp = RasterConverter . setDefaultColorRamp ( ColorRampEnum . COLOR_RAMP_HUE ) else : self . _colorRamp = colorRamp
Set the color ramp of the raster converter instance
67
10
14,788
def setDefaultColorRamp ( self , colorRampEnum = ColorRampEnum . COLOR_RAMP_HUE ) : self . _colorRamp = ColorRampGenerator . generateDefaultColorRamp ( colorRampEnum )
Returns the color ramp as a list of RGB tuples
56
11
14,789
def isNumber ( self , value ) : try : str ( value ) float ( value ) return True except ValueError : return False
Validate whether a value is a number or not
27
10
14,790
def check_exists ( subreddit ) : req = get ( 'http://www.reddit.com/r/%s/about.json' % subreddit , headers = { 'User-Agent' : 'CslBot/1.0' } ) if req . json ( ) . get ( 'kind' ) == 'Listing' : # no subreddit exists, search results page is shown return False return req . status_code == 200
Make sure that a subreddit actually exists .
93
8
14,791
def random_post ( subreddit , apikey ) : subreddit = '/r/random' if subreddit is None else '/r/%s' % subreddit urlstr = 'http://reddit.com%s/random?%s' % ( subreddit , time . time ( ) ) url = get ( urlstr , headers = { 'User-Agent' : 'CslBot/1.0' } ) . url return '** %s - %s' % ( get_title ( url , apikey ) , get_short ( url , apikey ) )
Gets a random post from a subreddit and returns a title and shortlink to it .
121
18
14,792
def parse_to_dict ( self ) : lst = [ ] with open ( self . fname , 'r' ) as f : hdr = f . readline ( ) self . hdrs = hdr . split ( ',' ) #print("self.hdrs = ", self.hdrs) for line in f : cols = line . split ( ',' ) if len ( cols ) == len ( self . hdrs ) : #print(cols) d = { } for ndx , col_header in enumerate ( self . hdrs ) : #d[self.hdrs[ndx].strip('\n').strip()] = cols[ndx].strip('\n').strip() d [ col_header . strip ( '\n' ) . strip ( ) ] = cols [ ndx ] . strip ( '\n' ) . strip ( ) lst . append ( d ) else : print ( "Error parsing " + self . fname + " line : " + line ) return lst
parse raw CSV into dictionary
227
5
14,793
def get_random_choice ( self ) : i = random . randint ( 0 , len ( self . dat ) - 1 ) return self . dat [ i ] [ 'name' ]
returns a random name from the class
40
8
14,794
def random_stats ( self , all_stats , race , ch_class ) : # create blank list of stats to be generated stats = [ ] res = { } for s in all_stats : stats . append ( s [ 'stat' ] ) res [ s [ 'stat' ] ] = 0 cur_stat = 0 for stat in stats : for ndx , i in enumerate ( self . classes . dat ) : if i [ 'name' ] == ch_class : cur_stat = int ( i [ stat ] ) # use stats for this class as baseline for ndx , i in enumerate ( self . races . dat ) : if i [ 'name' ] == race : cur_stat += int ( i [ stat ] ) # use stats for this race to modify base stats #print(stat, cur_stat) if cur_stat < 1 : cur_stat = 1 elif cur_stat > 10 : if stat not in ( 'Health' , 'max_health' ) : # dont trim down health cur_stat = 10 res [ stat ] = cur_stat return res
create random stats based on the characters class and race This looks up the tables from CharacterCollection to get base stats and applies a close random fit
233
28
14,795
def load_from_file ( self , fname ) : with open ( fname , 'r' ) as f : for line in f : k , v = line . split ( ' = ' ) self . _parse_char_line_to_self ( k , v )
OVERWRITES the current character object from stats in file
60
12
14,796
def _parse_char_line_to_self ( self , k , v ) : k = k . strip ( ' ' ) . strip ( '\n' ) v = v . strip ( ' ' ) . strip ( '\n' ) # print('_parse_char_line_to_self(self, k,v): ' , k, v) if k == 'CHARACTER' : self . name = v elif k == 'Race' : self . race = v elif k == 'Class' : self . ch_class = v elif k == 'STATS' : self . stats = self . _extract_stats_from_line ( v ) elif k == 'Story' : self . story = v . strip ( ' ' ) . strip ( '\n' ) elif k == 'SKILLS' : self . skills = v . split ( ', ' ) elif k == 'INVENTORY' : self . inventory = v . split ( ', ' )
takes a line from a saved file split into key and values and updates the appropriate self parameters of character .
218
22
14,797
def save_to_file ( self , fname ) : with open ( fname , 'w' ) as f : f . write ( str ( self ) )
saves a characters data to file
35
7
14,798
def copy ( self ) : return Character ( self . name , self . race , self . ch_class , self . stats , self . skills , self . story , self . inventory )
make an identical copy of the character
39
7
14,799
def palette ( fg , bg = - 1 ) : if not hasattr ( palette , "counter" ) : palette . counter = 1 if not hasattr ( palette , "selections" ) : palette . selections = { } selection = "%s%s" % ( str ( fg ) , str ( bg ) ) if not selection in palette . selections : palette . selections [ selection ] = palette . counter palette . counter += 1 # Get available colours colors = [ c for c in dir ( _curses ) if c . startswith ( 'COLOR' ) ] if isinstance ( fg , str ) : if not "COLOR_" + fg . upper ( ) in colors : fg = - 1 else : fg = getattr ( _curses , "COLOR_" + fg . upper ( ) ) if isinstance ( bg , str ) : if not "COLOR_" + bg . upper ( ) in colors : bg = - 1 else : bg = getattr ( _curses , "COLOR_" + bg . upper ( ) ) _curses . init_pair ( palette . selections [ selection ] , fg , bg ) return _curses . color_pair ( palette . selections [ selection ] )
Since curses only supports a finite amount of initialised colour pairs we memoise any selections you ve made as an attribute on this function
269
26