idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
249,600
def to_unit ( value , unit = 'B' ) : byte_array = [ 'B' , 'KB' , 'MB' , 'GB' , 'TB' ] if not isinstance ( value , ( int , float ) ) : value = float ( value ) if unit in byte_array : result = value / 1024 ** byte_array . index ( unit ) return round ( result , PRECISION ) , unit return value
Convert bytes to give unit .
93
7
249,601
def realtime_stream ( self , channel = 1 , typeno = 0 , path_file = None ) : ret = self . command ( 'realmonitor.cgi?action=getStream&channel={0}&subtype={1}' . format ( channel , typeno ) ) if path_file : with open ( path_file , 'wb' ) as out_file : shutil . copyfileobj ( ret . raw , out_file ) return ret . raw
If the stream is redirect to a file use mplayer tool to visualize the video record
101
17
249,602
def rtsp_url ( self , channelno = None , typeno = None ) : if channelno is None : channelno = 1 if typeno is None : typeno = 0 cmd = 'cam/realmonitor?channel={0}&subtype={1}' . format ( channelno , typeno ) try : port = ':' + [ x . split ( '=' ) [ 1 ] for x in self . rtsp_config . split ( ) if x . startswith ( 'table.RTSP.Port=' ) ] [ 0 ] except IndexError : port = '' return 'rtsp://{}:{}@{}{}/{}' . format ( self . _user , self . _password , self . _host , port , cmd )
Return RTSP streaming url
167
5
249,603
def mjpeg_url ( self , channelno = None , typeno = None ) : if channelno is None : channelno = 0 if typeno is None : typeno = 1 cmd = "mjpg/video.cgi?channel={0}&subtype={1}" . format ( channelno , typeno ) return '{0}{1}' . format ( self . _base_url , cmd )
Return MJPEG streaming url
89
6
249,604
def scan_devices ( self , subnet , timeout = None ) : # Maximum range from mask # Format is mask: max_range max_range = { 16 : 256 , 24 : 256 , 25 : 128 , 27 : 32 , 28 : 16 , 29 : 8 , 30 : 4 , 31 : 2 } # If user didn't provide mask, use /24 if "/" not in subnet : mask = int ( 24 ) network = subnet else : network , mask = subnet . split ( "/" ) mask = int ( mask ) if mask not in max_range : raise RuntimeError ( "Cannot determine the subnet mask!" ) # Default logic is remove everything from last "." to the end # This logic change in case mask is 16 network = network . rpartition ( "." ) [ 0 ] if mask == 16 : # For mask 16, we must cut the last two # entries with . # pylint: disable=unused-variable for i in range ( 0 , 1 ) : network = network . rpartition ( "." ) [ 0 ] # Trigger the scan # For clear coding, let's keep the logic in if/else (mask16) # instead of only one if if mask == 16 : for seq1 in range ( 0 , max_range [ mask ] ) : for seq2 in range ( 0 , max_range [ mask ] ) : ipaddr = "{0}.{1}.{2}" . format ( network , seq1 , seq2 ) thd = threading . Thread ( target = self . __raw_scan , args = ( ipaddr , timeout ) ) thd . start ( ) else : for seq1 in range ( 0 , max_range [ mask ] ) : ipaddr = "{0}.{1}" . format ( network , seq1 ) thd = threading . Thread ( target = self . __raw_scan , args = ( ipaddr , timeout ) ) thd . start ( ) return self . amcrest_ips
Scan cameras in a range of ips
422
8
249,605
def disallow ( self , foreign , permission = "active" , account = None , threshold = None , * * kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) if permission not in [ "owner" , "active" ] : raise ValueError ( "Permission needs to be either 'owner', or 'active" ) account = Account ( account , blockchain_instance = self ) authority = account [ permission ] try : pubkey = PublicKey ( foreign , prefix = self . prefix ) affected_items = list ( filter ( lambda x : x [ 0 ] == str ( pubkey ) , authority [ "key_auths" ] ) ) authority [ "key_auths" ] = list ( filter ( lambda x : x [ 0 ] != str ( pubkey ) , authority [ "key_auths" ] ) ) except : try : foreign_account = Account ( foreign , blockchain_instance = self ) affected_items = list ( filter ( lambda x : x [ 0 ] == foreign_account [ "id" ] , authority [ "account_auths" ] , ) ) authority [ "account_auths" ] = list ( filter ( lambda x : x [ 0 ] != foreign_account [ "id" ] , authority [ "account_auths" ] , ) ) except : raise ValueError ( "Unknown foreign account or unvalid public key" ) if not affected_items : raise ValueError ( "Changes nothing!" ) removed_weight = affected_items [ 0 ] [ 1 ] # Define threshold if threshold : authority [ "weight_threshold" ] = threshold # Correct threshold (at most by the amount removed from the # authority) try : self . _test_weights_treshold ( authority ) except : log . critical ( "The account's threshold will be reduced by %d" % ( removed_weight ) ) authority [ "weight_threshold" ] -= removed_weight self . _test_weights_treshold ( authority ) op = operations . Account_update ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , permission : authority , "extensions" : { } , } ) if permission == "owner" : return self . finalizeOp ( op , account [ "name" ] , "owner" , * * kwargs ) else : return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Remove additional access to an account by some other public key or account .
576
14
249,606
def approvewitness ( self , witnesses , account = None , * * kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account , blockchain_instance = self ) options = account [ "options" ] if not isinstance ( witnesses , ( list , set , tuple ) ) : witnesses = { witnesses } for witness in witnesses : witness = Witness ( witness , blockchain_instance = self ) options [ "votes" ] . append ( witness [ "vote_id" ] ) options [ "votes" ] = list ( set ( options [ "votes" ] ) ) options [ "num_witness" ] = len ( list ( filter ( lambda x : float ( x . split ( ":" ) [ 0 ] ) == 1 , options [ "votes" ] ) ) ) op = operations . Account_update ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , "new_options" : options , "extensions" : { } , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Approve a witness
304
5
249,607
def approvecommittee ( self , committees , account = None , * * kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account , blockchain_instance = self ) options = account [ "options" ] if not isinstance ( committees , ( list , set , tuple ) ) : committees = { committees } for committee in committees : committee = Committee ( committee , blockchain_instance = self ) options [ "votes" ] . append ( committee [ "vote_id" ] ) options [ "votes" ] = list ( set ( options [ "votes" ] ) ) options [ "num_committee" ] = len ( list ( filter ( lambda x : float ( x . split ( ":" ) [ 0 ] ) == 0 , options [ "votes" ] ) ) ) op = operations . Account_update ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , "new_options" : options , "extensions" : { } , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Approve a committee
302
5
249,608
def betting_market_rules_create ( self , names , descriptions , account = None , * * kwargs ) : assert isinstance ( names , list ) assert isinstance ( descriptions , list ) if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) op = operations . Betting_market_rules_create ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "name" : names , "description" : descriptions , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Create betting market rules
183
4
249,609
def betting_market_rules_update ( self , rules_id , names , descriptions , account = None , * * kwargs ) : assert isinstance ( names , list ) assert isinstance ( descriptions , list ) if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) rule = Rule ( rules_id ) op = operations . Betting_market_rules_update ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "betting_market_rules_id" : rule [ "id" ] , "new_name" : names , "new_description" : descriptions , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Update betting market rules
217
4
249,610
def bet_place ( self , betting_market_id , amount_to_bet , backer_multiplier , back_or_lay , account = None , * * kwargs ) : from . import GRAPHENE_BETTING_ODDS_PRECISION assert isinstance ( amount_to_bet , Amount ) assert back_or_lay in [ "back" , "lay" ] if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) bm = BettingMarket ( betting_market_id ) op = operations . Bet_place ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "bettor_id" : account [ "id" ] , "betting_market_id" : bm [ "id" ] , "amount_to_bet" : amount_to_bet . json ( ) , "backer_multiplier" : ( int ( backer_multiplier * GRAPHENE_BETTING_ODDS_PRECISION ) ) , "back_or_lay" : back_or_lay , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Place a bet
318
3
249,611
def bet_cancel ( self , bet_to_cancel , account = None , * * kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) bet = Bet ( bet_to_cancel ) op = operations . Bet_cancel ( * * { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "bettor_id" : account [ "id" ] , "bet_to_cancel" : bet [ "id" ] , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , * * kwargs )
Cancel a bet
192
4
249,612
def verbose ( f ) : @ click . pass_context def new_func ( ctx , * args , * * kwargs ) : global log verbosity = [ "critical" , "error" , "warn" , "info" , "debug" ] [ int ( min ( ctx . obj . get ( "verbose" , 0 ) , 4 ) ) ] log . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) formatter = logging . Formatter ( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) ch = logging . StreamHandler ( ) ch . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) ch . setFormatter ( formatter ) log . addHandler ( ch ) # GrapheneAPI logging if ctx . obj . get ( "verbose" , 0 ) > 4 : verbosity = [ "critical" , "error" , "warn" , "info" , "debug" ] [ int ( min ( ctx . obj . get ( "verbose" , 4 ) - 4 , 4 ) ) ] log = logging . getLogger ( "grapheneapi" ) log . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) log . addHandler ( ch ) if ctx . obj . get ( "verbose" , 0 ) > 8 : verbosity = [ "critical" , "error" , "warn" , "info" , "debug" ] [ int ( min ( ctx . obj . get ( "verbose" , 8 ) - 8 , 4 ) ) ] log = logging . getLogger ( "graphenebase" ) log . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) log . addHandler ( ch ) return ctx . invoke ( f , * args , * * kwargs ) return update_wrapper ( new_func , f )
Add verbose flags and add logging handlers
427
8
249,613
def offline ( f ) : @ click . pass_context @ verbose def new_func ( ctx , * args , * * kwargs ) : ctx . obj [ "offline" ] = True ctx . peerplays = PeerPlays ( * * ctx . obj ) ctx . blockchain = ctx . peerplays set_shared_peerplays_instance ( ctx . peerplays ) return ctx . invoke ( f , * args , * * kwargs ) return update_wrapper ( new_func , f )
This decorator allows you to access ctx . peerplays which is an instance of PeerPlays with offline = True .
115
25
249,614
def configfile ( f ) : @ click . pass_context def new_func ( ctx , * args , * * kwargs ) : ctx . config = yaml . load ( open ( ctx . obj [ "configfile" ] ) ) return ctx . invoke ( f , * args , * * kwargs ) return update_wrapper ( new_func , f )
This decorator will parse a configuration file in YAML format and store the dictionary in ctx . blockchain . config
83
24
249,615
def on_message ( self , ws , reply , * args ) : log . debug ( "Received message: %s" % str ( reply ) ) data = { } try : data = json . loads ( reply , strict = False ) except ValueError : raise ValueError ( "API node returned invalid format. Expected JSON!" ) if data . get ( "method" ) == "notice" : id = data [ "params" ] [ 0 ] if id >= len ( self . __events__ ) : log . critical ( "Received an id that is out of range\n\n" + str ( data ) ) return # This is a "general" object change notification if id == self . __events__ . index ( "on_object" ) : # Let's see if a specific object has changed for notice in data [ "params" ] [ 1 ] : try : if "id" in notice : self . process_notice ( notice ) else : for obj in notice : if "id" in obj : self . process_notice ( obj ) except Exception as e : log . critical ( "Error in process_notice: {}\n\n{}" . format ( str ( e ) , traceback . format_exc ) ) else : try : callbackname = self . __events__ [ id ] log . info ( "Patching through to call %s" % callbackname ) [ getattr ( self . events , callbackname ) ( x ) for x in data [ "params" ] [ 1 ] ] except Exception as e : log . critical ( "Error in {}: {}\n\n{}" . format ( callbackname , str ( e ) , traceback . format_exc ( ) ) )
This method is called by the websocket connection on every message that is received . If we receive a notice we hand over post - processing and signalling of events to process_notice .
365
36
249,616
def on_close ( self , ws ) : log . debug ( "Closing WebSocket connection with {}" . format ( self . url ) ) if self . keepalive and self . keepalive . is_alive ( ) : self . keepalive . do_run = False self . keepalive . join ( )
Called when websocket connection is closed
72
8
249,617
def run_forever ( self ) : cnt = 0 while True : cnt += 1 self . url = next ( self . urls ) log . debug ( "Trying to connect to node %s" % self . url ) try : # websocket.enableTrace(True) self . ws = websocket . WebSocketApp ( self . url , on_message = self . on_message , # on_data=self.on_message, on_error = self . on_error , on_close = self . on_close , on_open = self . on_open , ) self . ws . run_forever ( ) except websocket . WebSocketException as exc : if self . num_retries >= 0 and cnt > self . num_retries : raise NumRetriesReached ( ) sleeptime = ( cnt - 1 ) * 2 if cnt < 10 else 10 if sleeptime : log . warning ( "Lost connection to node during wsconnect(): %s (%d/%d) " % ( self . url , cnt , self . num_retries ) + "Retrying in %d seconds" % sleeptime ) time . sleep ( sleeptime ) except KeyboardInterrupt : self . ws . keep_running = False raise except Exception as e : log . critical ( "{}\n\n{}" . format ( str ( e ) , traceback . format_exc ( ) ) )
This method is used to run the websocket app continuously . It will execute callbacks as defined and try to stay connected with the provided APIs
314
28
249,618
def register_dataframe_method ( method ) : def inner ( * args , * * kwargs ) : class AccessorMethod ( object ) : def __init__ ( self , pandas_obj ) : self . _obj = pandas_obj @ wraps ( method ) def __call__ ( self , * args , * * kwargs ) : return method ( self . _obj , * args , * * kwargs ) register_dataframe_accessor ( method . __name__ ) ( AccessorMethod ) return method return inner ( )
Register a function as a method attached to the Pandas DataFrame .
118
14
249,619
def register_series_method ( method ) : def inner ( * args , * * kwargs ) : class AccessorMethod ( object ) : __doc__ = method . __doc__ def __init__ ( self , pandas_obj ) : self . _obj = pandas_obj @ wraps ( method ) def __call__ ( self , * args , * * kwargs ) : return method ( self . _obj , * args , * * kwargs ) register_series_accessor ( method . __name__ ) ( AccessorMethod ) return method return inner ( )
Register a function as a method attached to the Pandas Series .
125
13
249,620
def add_invites_to_user ( cls , user , amount ) : stat , _ = InvitationStat . objects . get_or_create ( user = user ) if stat . invites_allocated != - 1 : stat . invites_allocated += amount stat . save ( )
Add the specified number of invites to current allocated total .
62
11
249,621
def add_invites ( cls , amount ) : for user in get_user_model ( ) . objects . all ( ) : cls . add_invites_to_user ( user , amount )
Add invites for all users .
45
6
249,622
def topoff_user ( cls , user , amount ) : stat , _ = cls . objects . get_or_create ( user = user ) remaining = stat . invites_remaining ( ) if remaining != - 1 and remaining < amount : stat . invites_allocated += ( amount - remaining ) stat . save ( )
Ensure user has a minimum number of invites .
70
10
249,623
def topoff ( cls , amount ) : for user in get_user_model ( ) . objects . all ( ) : cls . topoff_user ( user , amount )
Ensure all users have a minimum number of invites .
39
11
249,624
def align ( self , alignment = None ) : if alignment is None : if self . reader . sysinfo . ProcessorArchitecture == PROCESSOR_ARCHITECTURE . AMD64 : alignment = 8 else : alignment = 4 offset = self . current_position % alignment if offset == 0 : return offset_to_aligned = ( alignment - offset ) % alignment self . seek ( offset_to_aligned , 1 ) return
Repositions the current reader to match architecture alignment
89
10
249,625
def peek ( self , length ) : t = self . current_position + length if not self . current_segment . inrange ( t ) : raise Exception ( 'Would read over segment boundaries!' ) return self . current_segment . data [ self . current_position - self . current_segment . start_address : t - self . current_segment . start_address ]
Returns up to length bytes from the current memory segment
83
10
249,626
def read ( self , size = - 1 ) : if size < - 1 : raise Exception ( 'You shouldnt be doing this' ) if size == - 1 : t = self . current_segment . remaining_len ( self . current_position ) if not t : return None old_new_pos = self . current_position self . current_position = self . current_segment . end_address return self . current_segment . data [ old_new_pos - self . current_segment . start_address : ] t = self . current_position + size if not self . current_segment . inrange ( t ) : raise Exception ( 'Would read over segment boundaries!' ) old_new_pos = self . current_position self . current_position = t return self . current_segment . data [ old_new_pos - self . current_segment . start_address : t - self . current_segment . start_address ]
Returns data bytes of size size from the current segment . If size is - 1 it returns all the remaining data bytes from memory segment
208
26
249,627
def read_int ( self ) : if self . reader . sysinfo . ProcessorArchitecture == PROCESSOR_ARCHITECTURE . AMD64 : return int . from_bytes ( self . read ( 8 ) , byteorder = 'little' , signed = True ) else : return int . from_bytes ( self . read ( 4 ) , byteorder = 'little' , signed = True )
Reads an integer . The size depends on the architecture . Reads a 4 byte small - endian singed int on 32 bit arch Reads an 8 byte small - endian singed int on 64 bit arch
85
44
249,628
def read_uint ( self ) : if self . reader . sysinfo . ProcessorArchitecture == PROCESSOR_ARCHITECTURE . AMD64 : return int . from_bytes ( self . read ( 8 ) , byteorder = 'little' , signed = False ) else : return int . from_bytes ( self . read ( 4 ) , byteorder = 'little' , signed = False )
Reads an integer . The size depends on the architecture . Reads a 4 byte small - endian unsinged int on 32 bit arch Reads an 8 byte small - endian unsinged int on 64 bit arch
85
44
249,629
def find ( self , pattern ) : pos = self . current_segment . data . find ( pattern ) if pos == - 1 : return - 1 return pos + self . current_position
Searches for a pattern in the current memory segment
40
11
249,630
def find_all ( self , pattern ) : pos = [ ] last_found = - 1 while True : last_found = self . current_segment . data . find ( pattern , last_found + 1 ) if last_found == - 1 : break pos . append ( last_found + self . current_segment . start_address ) return pos
Searches for all occurrences of a pattern in the current memory segment returns all occurrences as a list
76
20
249,631
def find_global ( self , pattern ) : pos_s = self . reader . search ( pattern ) if len ( pos_s ) == 0 : return - 1 return pos_s [ 0 ]
Searches for the pattern in the whole process memory space and returns the first occurrence . This is exhaustive!
42
22
249,632
def report_privilege_information ( ) : privileges = get_privilege_information ( ) print ( "found {0} privileges" . format ( privileges . count ) ) tuple ( map ( print , privileges ) )
Report all privilege information assigned to the current process .
46
10
249,633
async def handle ( self ) : # For each channel, launch its own listening coroutine listeners = [ ] for key , value in self . beat_config . items ( ) : listeners . append ( asyncio . ensure_future ( self . listener ( key ) ) ) # For each beat configuration, launch it's own sending pattern emitters = [ ] for key , value in self . beat_config . items ( ) : emitters . append ( asyncio . ensure_future ( self . emitters ( key , value ) ) ) # Wait for them all to exit await asyncio . wait ( emitters ) await asyncio . wait ( listeners )
Listens on all the provided channels and handles the messages .
136
12
249,634
async def emitters ( self , key , value ) : while True : await asyncio . sleep ( value [ 'schedule' ] . total_seconds ( ) ) await self . channel_layer . send ( key , { "type" : value [ 'type' ] , "message" : value [ 'message' ] } )
Single - channel emitter
71
5
249,635
async def listener ( self , channel ) : while True : message = await self . channel_layer . receive ( channel ) if not message . get ( "type" , None ) : raise ValueError ( "Worker received message with no type." ) # Make a scope and get an application instance for it scope = { "type" : "channel" , "channel" : channel } instance_queue = self . get_or_create_application_instance ( channel , scope ) # Run the message into the app await instance_queue . put ( message )
Single - channel listener
117
4
249,636
def rating_count ( obj ) : count = Rating . objects . filter ( object_id = obj . pk , content_type = ContentType . objects . get_for_model ( obj ) , ) . exclude ( rating = 0 ) . count ( ) return count
Total amount of users who have submitted a positive rating for this object .
57
14
249,637
def set_pixel ( self , x , y , value ) : if x < 0 or x > 7 or y < 0 or y > 7 : # Ignore out of bounds pixels. return # Set green LED based on 1st bit in value. self . set_led ( y * 16 + x , 1 if value & GREEN > 0 else 0 ) # Set red LED based on 2nd bit in value. self . set_led ( y * 16 + x + 8 , 1 if value & RED > 0 else 0 )
Set pixel at position x y to the given value . X and Y should be values of 0 to 8 . Value should be OFF GREEN RED or YELLOW .
109
33
249,638
def set_bar ( self , bar , value ) : if bar < 0 or bar > 23 : # Ignore out of bounds bars. return # Compute cathode and anode value. c = ( bar if bar < 12 else bar - 12 ) // 4 a = bar % 4 if bar >= 12 : a += 4 # Set green LED based on 1st bit in value. self . set_led ( c * 16 + a + 8 , 1 if value & GREEN > 0 else 0 ) # Set red LED based on 2nd bit in value. self . set_led ( c * 16 + a , 1 if value & RED > 0 else 0 )
Set bar to desired color . Bar should be a value of 0 to 23 and value should be OFF GREEN RED or YELLOW .
137
27
249,639
def animate ( self , images , delay = .25 ) : for image in images : # Draw the image on the display buffer. self . set_image ( image ) # Draw the buffer to the display hardware. self . write_display ( ) time . sleep ( delay )
Displays each of the input images in order pausing for delay seconds after each image .
57
18
249,640
def set_pixel ( self , x , y , value ) : if x < 0 or x > 7 or y < 0 or y > 15 : # Ignore out of bounds pixels. return self . set_led ( ( 7 - x ) * 16 + y , value )
Set pixel at position x y to the given value . X and Y should be values of 0 to 7 and 0 to 15 resp . Value should be 0 for off and non - zero for on .
57
40
249,641
def set_image ( self , image ) : imwidth , imheight = image . size if imwidth != 8 or imheight != 16 : raise ValueError ( 'Image must be an 8x16 pixels in size.' ) # Convert image to 1 bit color and grab all the pixels. pix = image . convert ( '1' ) . load ( ) # Loop through each pixel and write the display buffer pixel. for x in xrange ( 8 ) : for y in xrange ( 16 ) : color = pix [ ( x , y ) ] # Handle the color of the pixel, off or on. if color == 0 : self . set_pixel ( x , y , 0 ) else : self . set_pixel ( x , y , 1 )
Set display buffer to Python Image Library image . Image will be converted to 1 bit color and non - zero color values will light the LEDs .
159
28
249,642
def horizontal_scroll ( self , image , padding = True ) : image_list = list ( ) width = image . size [ 0 ] # Scroll into the blank image. if padding : for x in range ( 8 ) : section = image . crop ( ( 0 , 0 , x , 16 ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 8 - x , 0 , 8 , 16 ) ) image_list . append ( display_section ) #Scroll across the input image. for x in range ( 8 , width + 1 ) : section = image . crop ( ( x - 8 , 0 , x , 16 ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 8 , 16 ) ) image_list . append ( display_section ) #Scroll out, leaving the blank image. if padding : for x in range ( width - 7 , width + 1 ) : section = image . crop ( ( x , 0 , width , 16 ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 7 - ( x - ( width - 7 ) ) , 16 ) ) image_list . append ( display_section ) #Return the list of images created return image_list
Returns a list of images which appear to scroll from left to right across the input image when displayed on the LED matrix in order .
288
26
249,643
def vertical_scroll ( self , image , padding = True ) : image_list = list ( ) height = image . size [ 1 ] # Scroll into the blank image. if padding : for y in range ( 16 ) : section = image . crop ( ( 0 , 0 , 8 , y ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 8 - y , 8 , 16 ) ) image_list . append ( display_section ) #Scroll across the input image. for y in range ( 16 , height + 1 ) : section = image . crop ( ( 0 , y - 16 , 8 , y ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 8 , 16 ) ) image_list . append ( display_section ) #Scroll out, leaving the blank image. if padding : for y in range ( height - 15 , height + 1 ) : section = image . crop ( ( 0 , y , 8 , height ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 8 , 7 - ( y - ( height - 15 ) ) ) ) image_list . append ( display_section ) #Return the list of images created return image_list
Returns a list of images which appear to scroll from top to bottom down the input image when displayed on the LED matrix in order .
288
26
249,644
def print_number_str ( self , value , justify_right = True ) : # Calculate length of value without decimals. length = len ( value . translate ( None , '.' ) ) # Error if value without decimals is longer than 4 characters. if length > 4 : self . print_str ( '----' ) return # Calculcate starting position of digits based on justification. pos = ( 4 - length ) if justify_right else 0 # Go through each character and print it on the display. for i , ch in enumerate ( value ) : if ch == '.' : # Print decimal points on the previous digit. self . set_decimal ( pos - 1 , True ) else : self . set_digit ( pos , ch ) pos += 1
Print a 4 character long string of numeric values to the display . This function is similar to print_str but will interpret periods not as characters but as decimal points associated with the previous character .
164
38
249,645
def print_float ( self , value , decimal_digits = 2 , justify_right = True ) : format_string = '{{0:0.{0}F}}' . format ( decimal_digits ) self . print_number_str ( format_string . format ( value ) , justify_right )
Print a numeric value to the display . If value is negative it will be printed with a leading minus sign . Decimal digits is the desired number of digits after the decimal point .
68
36
249,646
def set_left_colon ( self , show_colon ) : if show_colon : self . buffer [ 4 ] |= 0x04 self . buffer [ 4 ] |= 0x08 else : self . buffer [ 4 ] &= ( ~ 0x04 ) & 0xFF self . buffer [ 4 ] &= ( ~ 0x08 ) & 0xFF
Turn the left colon on with show color True or off with show colon False . Only the large 1 . 2 7 - segment display has a left colon .
82
31
249,647
def print_number_str ( self , value , justify_right = True ) : # Calculate length of value without decimals. length = sum ( map ( lambda x : 1 if x != '.' else 0 , value ) ) # Error if value without decimals is longer than 4 characters. if length > 4 : self . print_number_str ( '----' ) return # Calculcate starting position of digits based on justification. pos = ( 4 - length ) if justify_right else 0 # Go through each character and print it on the display. for i , ch in enumerate ( value ) : if ch == '.' : # Print decimal points on the previous digit. self . set_decimal ( pos - 1 , True ) else : self . set_digit ( pos , ch ) pos += 1
Print a 4 character long string of numeric values to the display . Characters in the string should be any supported character by set_digit or a decimal point . Decimal point characters will be associated with the previous character .
173
43
249,648
def begin ( self ) : # Turn on the oscillator. self . _device . writeList ( HT16K33_SYSTEM_SETUP | HT16K33_OSCILLATOR , [ ] ) # Turn display on with no blinking. self . set_blink ( HT16K33_BLINK_OFF ) # Set display to full brightness. self . set_brightness ( 15 )
Initialize driver with LEDs enabled and all turned off .
86
11
249,649
def write_display ( self ) : for i , value in enumerate ( self . buffer ) : self . _device . write8 ( i , value )
Write display buffer to display hardware .
33
7
249,650
def clear ( self ) : for i , value in enumerate ( self . buffer ) : self . buffer [ i ] = 0
Clear contents of display buffer .
27
6
249,651
def get_readonly_fields ( self , request , obj = None ) : if obj : return list ( self . readonly_fields ) + [ 'id' , 'identity' , 'is_current' ] return self . readonly_fields
This is required a subclass of VersionedAdmin has readonly_fields ours won t be undone
54
19
249,652
def get_list_display ( self , request ) : # Force cast to list as super get_list_display could return a tuple list_display = list ( super ( VersionedAdmin , self ) . get_list_display ( request ) ) # Preprend the following fields to list display if self . list_display_show_identity : list_display = [ 'identity_shortener' , ] + list_display # Append the following fields to list display if self . list_display_show_start_date : list_display += [ 'version_start_date' , ] if self . list_display_show_end_date : list_display += [ 'version_end_date' , ] return list_display + [ 'is_current' , ]
This method determines which fields go in the changelist
168
10
249,653
def get_list_filter ( self , request ) : list_filter = super ( VersionedAdmin , self ) . get_list_filter ( request ) return list ( list_filter ) + [ ( 'version_start_date' , DateTimeFilter ) , IsCurrentFilter ]
Adds versionable custom filtering ability to changelist
60
9
249,654
def restore ( self , request , * args , * * kwargs ) : paths = request . path_info . split ( '/' ) object_id_index = paths . index ( "restore" ) - 2 object_id = paths [ object_id_index ] obj = super ( VersionedAdmin , self ) . get_object ( request , object_id ) obj . restore ( ) admin_wordIndex = object_id_index - 3 path = "/%s" % ( "/" . join ( paths [ admin_wordIndex : object_id_index ] ) ) opts = self . model . _meta msg_dict = { 'name' : force_text ( opts . verbose_name ) , 'obj' : format_html ( '<a href="{}">{}</a>' , urlquote ( request . path ) , obj ) , } msg = format_html ( _ ( 'The {name} "{obj}" was restored successfully.' ) , * * msg_dict ) self . message_user ( request , msg , messages . SUCCESS ) return HttpResponseRedirect ( path )
View for restoring object from change view
244
7
249,655
def will_not_clone ( self , request , * args , * * kwargs ) : paths = request . path_info . split ( '/' ) index_of_object_id = paths . index ( "will_not_clone" ) - 1 object_id = paths [ index_of_object_id ] self . change_view ( request , object_id ) admin_wordInUrl = index_of_object_id - 3 # This gets the adminsite for the app, and the model name and joins # together with / path = '/' + '/' . join ( paths [ admin_wordInUrl : index_of_object_id ] ) return HttpResponseRedirect ( path )
Add save but not clone capability in the changeview
153
10
249,656
def exclude ( self ) : exclude = self . VERSIONED_EXCLUDE if super ( VersionedAdmin , self ) . exclude is not None : # Force cast to list as super exclude could return a tuple exclude = list ( super ( VersionedAdmin , self ) . exclude ) + exclude return exclude
Custom descriptor for exclude since there is no get_exclude method to be overridden
64
17
249,657
def get_object ( self , request , object_id , from_field = None ) : # from_field breaks in 1.7.8 obj = super ( VersionedAdmin , self ) . get_object ( request , object_id ) # Only clone if update view as get_object() is also called for change, # delete, and history views if request . method == 'POST' and obj and obj . is_latest and 'will_not_clone' not in request . path and 'delete' not in request . path and 'restore' not in request . path : obj = obj . clone ( ) return obj
our implementation of get_object allows for cloning when updating an object not cloning when the button save but not clone is pushed and at no other time will clone be called
133
33
249,658
def get_urls ( self ) : not_clone_url = [ url ( r'^(.+)/will_not_clone/$' , admin . site . admin_view ( self . will_not_clone ) ) ] restore_url = [ url ( r'^(.+)/restore/$' , admin . site . admin_view ( self . restore ) ) ] return not_clone_url + restore_url + super ( VersionedAdmin , self ) . get_urls ( )
Appends the custom will_not_clone url to the admin site
110
14
249,659
def create_current_version_unique_identity_indexes ( app_name , database = None ) : indexes_created = 0 connection = database_connection ( database ) with connection . cursor ( ) as cursor : for model in versionable_models ( app_name ) : if getattr ( model . _meta , 'managed' , True ) : table_name = model . _meta . db_table index_name = '%s_%s_identity_v_uniq' % ( app_name , table_name ) if not index_exists ( cursor , index_name ) : cursor . execute ( "CREATE UNIQUE INDEX %s ON %s(%s) " "WHERE version_end_date IS NULL" % ( index_name , table_name , 'identity' ) ) indexes_created += 1 return indexes_created
Add partial unique indexes for the the identity column of versionable models .
187
14
249,660
def get_queryset ( self ) : qs = VersionedQuerySet ( self . model , using = self . _db ) if hasattr ( self , 'instance' ) and hasattr ( self . instance , '_querytime' ) : qs . querytime = self . instance . _querytime return qs
Returns a VersionedQuerySet capable of handling version time restrictions .
70
13
249,661
def next_version ( self , object , relations_as_of = 'end' ) : if object . version_end_date is None : next = object else : next = self . filter ( Q ( identity = object . identity ) , Q ( version_start_date__gte = object . version_end_date ) ) . order_by ( 'version_start_date' ) . first ( ) if not next : raise ObjectDoesNotExist ( "next_version couldn't find a next version of object " + str ( object . identity ) ) return self . adjust_version_as_of ( next , relations_as_of )
Return the next version of the given object .
139
9
249,662
def previous_version ( self , object , relations_as_of = 'end' ) : if object . version_birth_date == object . version_start_date : previous = object else : previous = self . filter ( Q ( identity = object . identity ) , Q ( version_end_date__lte = object . version_start_date ) ) . order_by ( '-version_end_date' ) . first ( ) if not previous : raise ObjectDoesNotExist ( "previous_version couldn't find a previous version of " "object " + str ( object . identity ) ) return self . adjust_version_as_of ( previous , relations_as_of )
Return the previous version of the given object .
149
9
249,663
def current_version ( self , object , relations_as_of = None , check_db = False ) : if object . version_end_date is None and not check_db : current = object else : current = self . current . filter ( identity = object . identity ) . first ( ) return self . adjust_version_as_of ( current , relations_as_of )
Return the current version of the given object .
82
9
249,664
def adjust_version_as_of ( version , relations_as_of ) : if not version : return version if relations_as_of == 'end' : if version . is_current : # Ensure that version._querytime is active, in case it wasn't # before. version . as_of = None else : version . as_of = version . version_end_date - datetime . timedelta ( microseconds = 1 ) elif relations_as_of == 'start' : version . as_of = version . version_start_date elif isinstance ( relations_as_of , datetime . datetime ) : as_of = relations_as_of . astimezone ( utc ) if not as_of >= version . version_start_date : raise ValueError ( "Provided as_of '{}' is earlier than version's start " "time '{}'" . format ( as_of . isoformat ( ) , version . version_start_date . isoformat ( ) ) ) if version . version_end_date is not None and as_of >= version . version_end_date : raise ValueError ( "Provided as_of '{}' is later than version's start " "time '{}'" . format ( as_of . isoformat ( ) , version . version_end_date . isoformat ( ) ) ) version . as_of = as_of elif relations_as_of is None : version . _querytime = QueryTime ( time = None , active = False ) else : raise TypeError ( "as_of parameter must be 'start', 'end', None, or datetime " "object" ) return version
Adjusts the passed version s as_of time to an appropriate value and returns it .
365
18
249,665
def _fetch_all ( self ) : if self . _result_cache is None : self . _result_cache = list ( self . iterator ( ) ) # TODO: Do we have to test for ValuesListIterable, ValuesIterable, # and FlatValuesListIterable here? if self . _iterable_class == ModelIterable : for x in self . _result_cache : self . _set_item_querytime ( x ) if self . _prefetch_related_lookups and not self . _prefetch_done : self . _prefetch_related_objects ( )
Completely overrides the QuerySet . _fetch_all method by adding the timestamp to all objects
133
21
249,666
def _clone ( self , * args , * * kwargs ) : clone = super ( VersionedQuerySet , self ) . _clone ( * * kwargs ) clone . querytime = self . querytime return clone
Overrides the QuerySet . _clone method by adding the cloning of the VersionedQuerySet s query_time parameter
48
25
249,667
def _set_item_querytime ( self , item , type_check = True ) : if isinstance ( item , Versionable ) : item . _querytime = self . querytime elif isinstance ( item , VersionedQuerySet ) : item . querytime = self . querytime else : if type_check : raise TypeError ( "This item is not a Versionable, it's a " + str ( type ( item ) ) ) return item
Sets the time for which the query was made on the resulting item
97
14
249,668
def as_of ( self , qtime = None ) : clone = self . _clone ( ) clone . querytime = QueryTime ( time = qtime , active = True ) return clone
Sets the time for which we want to retrieve an object .
40
13
249,669
def delete ( self ) : assert self . query . can_filter ( ) , "Cannot use 'limit' or 'offset' with delete." # Ensure that only current objects are selected. del_query = self . filter ( version_end_date__isnull = True ) # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query . _for_write = True # Disable non-supported fields. del_query . query . select_for_update = False del_query . query . select_related = False del_query . query . clear_ordering ( force_empty = True ) collector_class = get_versioned_delete_collector_class ( ) collector = collector_class ( using = del_query . db ) collector . collect ( del_query ) collector . delete ( get_utc_now ( ) ) # Clear the result cache, in case this QuerySet gets reused. self . _result_cache = None
Deletes the records in the QuerySet .
229
9
249,670
def uuid ( uuid_value = None ) : if uuid_value : if not validate_uuid ( uuid_value ) : raise ValueError ( "uuid_value must be a valid UUID version 4 object" ) else : uuid_value = uuid . uuid4 ( ) if versions_settings . VERSIONS_USE_UUIDFIELD : return uuid_value else : return six . u ( str ( uuid_value ) )
Returns a uuid value that is valid to use for id and identity fields .
100
16
249,671
def restore ( self , * * kwargs ) : if not self . pk : raise ValueError ( 'Instance must be saved and terminated before it can be ' 'restored.' ) if self . is_current : raise ValueError ( 'This is the current version, no need to restore it.' ) if self . get_deferred_fields ( ) : # It would be necessary to fetch the record from the database # again for this to succeed. # Alternatively, perhaps it would be possible to create a copy # of the object after fetching the missing fields. # Doing so may be unexpected by the calling code, so raise an # exception: the calling code should be adapted if necessary. raise ValueError ( 'Can not restore a model instance that has deferred fields' ) cls = self . __class__ now = get_utc_now ( ) restored = copy . copy ( self ) restored . version_end_date = None restored . version_start_date = now fields = [ f for f in cls . _meta . local_fields if f . name not in Versionable . VERSIONABLE_FIELDS ] for field in fields : if field . attname in kwargs : # Fake an object in order to avoid a DB roundtrip # This was made necessary, since assigning to the field's # attname did not work anymore with Django 2.0 obj = field . remote_field . model ( id = kwargs [ field . attname ] ) setattr ( restored , field . name , obj ) elif field . name in kwargs : setattr ( restored , field . name , kwargs [ field . name ] ) elif isinstance ( field , ForeignKey ) : # Set all non-provided ForeignKeys to None. If required, # raise an error. try : setattr ( restored , field . name , None ) # Check for non null foreign key removed since Django 1.10 # https://docs.djangoproject.com/en/1.10/releases/1.10/ # #removed-null-assignment-check-for-non-null-foreign- # key-fields if not field . null : raise ValueError except ValueError : raise ForeignKeyRequiresValueError self . id = self . uuid ( ) with transaction . atomic ( ) : # If this is not the latest version, terminate the latest version latest = cls . objects . current_version ( self , check_db = True ) if latest and latest != self : latest . delete ( ) restored . version_start_date = latest . version_end_date self . save ( ) restored . save ( ) # Update ManyToMany relations to point to the old version's id # instead of the restored version's id. for field_name in self . get_all_m2m_field_names ( ) : manager = getattr ( restored , field_name ) # returns a VersionedRelatedManager instance manager . through . objects . filter ( * * { manager . source_field . attname : restored . id } ) . update ( * * { manager . source_field_name : self } ) return restored
Restores this version as a new version and returns this new version .
669
14
249,672
def detach ( self ) : self . id = self . identity = self . uuid ( ) self . version_start_date = self . version_birth_date = get_utc_now ( ) self . version_end_date = None return self
Detaches the instance from its history .
55
8
249,673
def matches_querytime ( instance , querytime ) : if not querytime . active : return True if not querytime . time : return instance . version_end_date is None return ( instance . version_start_date <= querytime . time and ( instance . version_end_date is None or instance . version_end_date > querytime . time ) )
Checks whether the given instance satisfies the given QueryTime object .
78
13
249,674
def contribute_to_related_class ( self , cls , related ) : # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. super ( VersionedForeignKey , self ) . contribute_to_related_class ( cls , related ) accessor_name = related . get_accessor_name ( ) if hasattr ( cls , accessor_name ) : setattr ( cls , accessor_name , VersionedReverseManyToOneDescriptor ( related ) )
Override ForeignKey s methods and replace the descriptor if set by the parent s methods
126
16
249,675
def get_joining_columns ( self , reverse_join = False ) : source = self . reverse_related_fields if reverse_join else self . related_fields joining_columns = tuple ( ) for lhs_field , rhs_field in source : lhs_col_name = lhs_field . column rhs_col_name = rhs_field . column # Test whether # - self is the current ForeignKey relationship # - self was not auto_created (e.g. is not part of a M2M # relationship) if self is lhs_field and not self . auto_created : if rhs_col_name == Versionable . VERSION_IDENTIFIER_FIELD : rhs_col_name = Versionable . OBJECT_IDENTIFIER_FIELD elif self is rhs_field and not self . auto_created : if lhs_col_name == Versionable . VERSION_IDENTIFIER_FIELD : lhs_col_name = Versionable . OBJECT_IDENTIFIER_FIELD joining_columns = joining_columns + ( ( lhs_col_name , rhs_col_name ) , ) return joining_columns
Get and return joining columns defined by this foreign key relationship
259
11
249,676
def get_versioned_delete_collector_class ( ) : key = 'VERSIONED_DELETE_COLLECTOR' try : cls = _cache [ key ] except KeyError : collector_class_string = getattr ( settings , key ) cls = import_from_string ( collector_class_string , key ) _cache [ key ] = cls return cls
Gets the class to use for deletion collection .
84
10
249,677
def related_objects ( self , related , objs ) : from versions . models import Versionable related_model = related . related_model if issubclass ( related_model , Versionable ) : qs = related_model . objects . current else : qs = related_model . _base_manager . all ( ) return qs . using ( self . using ) . filter ( * * { "%s__in" % related . field . name : objs } )
Gets a QuerySet of current objects related to objs via the relation related .
101
17
249,678
def versionable_delete ( self , instance , timestamp ) : instance . _delete_at ( timestamp , using = self . using )
Soft - deletes the instance setting it s version_end_date to timestamp .
28
17
249,679
def pks_from_objects ( self , objects ) : return { o . pk if isinstance ( o , Model ) else o for o in objects }
Extract all the primary key strings from the given objects . Objects may be Versionables or bare primary keys .
34
22
249,680
def fit ( self , vecs , iter = 20 , seed = 123 ) : assert vecs . dtype == np . float32 assert vecs . ndim == 2 N , D = vecs . shape assert self . Ks < N , "the number of training vector should be more than Ks" assert D % self . M == 0 , "input dimension must be dividable by M" self . Ds = int ( D / self . M ) np . random . seed ( seed ) if self . verbose : print ( "iter: {}, seed: {}" . format ( iter , seed ) ) # [m][ks][ds]: m-th subspace, ks-the codeword, ds-th dim self . codewords = np . zeros ( ( self . M , self . Ks , self . Ds ) , dtype = np . float32 ) for m in range ( self . M ) : if self . verbose : print ( "Training the subspace: {} / {}" . format ( m , self . M ) ) vecs_sub = vecs [ : , m * self . Ds : ( m + 1 ) * self . Ds ] self . codewords [ m ] , _ = kmeans2 ( vecs_sub , self . Ks , iter = iter , minit = 'points' ) return self
Given training vectors run k - means for each sub - space and create codewords for each sub - space .
297
23
249,681
def encode ( self , vecs ) : assert vecs . dtype == np . float32 assert vecs . ndim == 2 N , D = vecs . shape assert D == self . Ds * self . M , "input dimension must be Ds * M" # codes[n][m] : code of n-th vec, m-th subspace codes = np . empty ( ( N , self . M ) , dtype = self . code_dtype ) for m in range ( self . M ) : if self . verbose : print ( "Encoding the subspace: {} / {}" . format ( m , self . M ) ) vecs_sub = vecs [ : , m * self . Ds : ( m + 1 ) * self . Ds ] codes [ : , m ] , _ = vq ( vecs_sub , self . codewords [ m ] ) return codes
Encode input vectors into PQ - codes .
197
10
249,682
def decode ( self , codes ) : assert codes . ndim == 2 N , M = codes . shape assert M == self . M assert codes . dtype == self . code_dtype vecs = np . empty ( ( N , self . Ds * self . M ) , dtype = np . float32 ) for m in range ( self . M ) : vecs [ : , m * self . Ds : ( m + 1 ) * self . Ds ] = self . codewords [ m ] [ codes [ : , m ] , : ] return vecs
Given PQ - codes reconstruct original D - dimensional vectors approximately by fetching the codewords .
123
20
249,683
def transaction ( ) : client = default_client ( ) _thread . client = client . pipeline ( ) try : yield _thread . client . execute ( ) finally : _thread . client = client
Swaps out the current client with a pipeline instance so that each Redis method call inside the context will be pipelined . Once the context is exited we execute the pipeline .
41
36
249,684
def _get_lua_path ( self , name ) : parts = ( os . path . dirname ( os . path . abspath ( __file__ ) ) , "lua" , name ) return os . path . join ( * parts )
Joins the given name with the relative path of the module .
52
13
249,685
def _create_lua_method ( self , name , code ) : script = self . register_script ( code ) setattr ( script , "name" , name ) # Helps debugging redis lib. method = lambda key , * a , * * k : script ( keys = [ key ] , args = a , * * k ) setattr ( self , name , method )
Registers the code snippet as a Lua script and binds the script to the client as a method that can be called with the same signature as regular client methods eg with a single key arg .
81
38
249,686
def value_left ( self , other ) : return other . value if isinstance ( other , self . __class__ ) else other
Returns the value of the other type instance to use in an operator method namely when the method s instance is on the left side of the expression .
28
29
249,687
def value_right ( self , other ) : return self if isinstance ( other , self . __class__ ) else self . value
Returns the value of the type instance calling an to use in an operator method namely when the method s instance is on the right side of the expression .
28
30
249,688
def op_left ( op ) : def method ( self , other ) : return op ( self . value , value_left ( self , other ) ) return method
Returns a type instance method for the given operator applied when the instance appears on the left side of the expression .
34
22
249,689
def op_right ( op ) : def method ( self , other ) : return op ( value_left ( self , other ) , value_right ( self , other ) ) return method
Returns a type instance method for the given operator applied when the instance appears on the right side of the expression .
39
22
249,690
def on ( self , event , f = None ) : def _on ( f ) : self . _add_event_handler ( event , f , f ) return f if f is None : return _on else : return _on ( f )
Registers the function f to the event name event .
52
11
249,691
def once ( self , event , f = None ) : def _wrapper ( f ) : def g ( * args , * * kwargs ) : self . remove_listener ( event , f ) # f may return a coroutine, so we need to return that # result here so that emit can schedule it return f ( * args , * * kwargs ) self . _add_event_handler ( event , f , g ) return f if f is None : return _wrapper else : return _wrapper ( f )
The same as ee . on except that the listener is automatically removed after being called .
111
18
249,692
def remove_all_listeners ( self , event = None ) : if event is not None : self . _events [ event ] = OrderedDict ( ) else : self . _events = defaultdict ( OrderedDict )
Remove all listeners attached to event . If event is None remove all listeners on all events .
50
18
249,693
def offsetcopy ( s , newoffset ) : assert 0 <= newoffset < 8 if not s . bitlength : return copy . copy ( s ) else : if newoffset == s . offset % 8 : return ByteStore ( s . getbyteslice ( s . byteoffset , s . byteoffset + s . bytelength ) , s . bitlength , newoffset ) newdata = [ ] d = s . _rawarray assert newoffset != s . offset % 8 if newoffset < s . offset % 8 : # We need to shift everything left shiftleft = s . offset % 8 - newoffset # First deal with everything except for the final byte for x in range ( s . byteoffset , s . byteoffset + s . bytelength - 1 ) : newdata . append ( ( ( d [ x ] << shiftleft ) & 0xff ) + ( d [ x + 1 ] >> ( 8 - shiftleft ) ) ) bits_in_last_byte = ( s . offset + s . bitlength ) % 8 if not bits_in_last_byte : bits_in_last_byte = 8 if bits_in_last_byte > shiftleft : newdata . append ( ( d [ s . byteoffset + s . bytelength - 1 ] << shiftleft ) & 0xff ) else : # newoffset > s._offset % 8 shiftright = newoffset - s . offset % 8 newdata . append ( s . getbyte ( 0 ) >> shiftright ) for x in range ( s . byteoffset + 1 , s . byteoffset + s . bytelength ) : newdata . append ( ( ( d [ x - 1 ] << ( 8 - shiftright ) ) & 0xff ) + ( d [ x ] >> shiftright ) ) bits_in_last_byte = ( s . offset + s . bitlength ) % 8 if not bits_in_last_byte : bits_in_last_byte = 8 if bits_in_last_byte + shiftright > 8 : newdata . append ( ( d [ s . byteoffset + s . bytelength - 1 ] << ( 8 - shiftright ) ) & 0xff ) new_s = ByteStore ( bytearray ( newdata ) , s . bitlength , newoffset ) assert new_s . offset == newoffset return new_s
Return a copy of a ByteStore with the newoffset .
499
12
249,694
def structparser ( token ) : m = STRUCT_PACK_RE . match ( token ) if not m : return [ token ] else : endian = m . group ( 'endian' ) if endian is None : return [ token ] # Split the format string into a list of 'q', '4h' etc. formatlist = re . findall ( STRUCT_SPLIT_RE , m . group ( 'fmt' ) ) # Now deal with mulitiplicative factors, 4h -> hhhh etc. fmt = '' . join ( [ f [ - 1 ] * int ( f [ : - 1 ] ) if len ( f ) != 1 else f for f in formatlist ] ) if endian == '@' : # Native endianness if byteorder == 'little' : endian = '<' else : assert byteorder == 'big' endian = '>' if endian == '<' : tokens = [ REPLACEMENTS_LE [ c ] for c in fmt ] else : assert endian == '>' tokens = [ REPLACEMENTS_BE [ c ] for c in fmt ] return tokens
Parse struct - like format string token into sub - token list .
248
14
249,695
def tokenparser ( fmt , keys = None , token_cache = { } ) : try : return token_cache [ ( fmt , keys ) ] except KeyError : token_key = ( fmt , keys ) # Very inefficient expanding of brackets. fmt = expand_brackets ( fmt ) # Split tokens by ',' and remove whitespace # The meta_tokens can either be ordinary single tokens or multiple # struct-format token strings. meta_tokens = ( '' . join ( f . split ( ) ) for f in fmt . split ( ',' ) ) return_values = [ ] stretchy_token = False for meta_token in meta_tokens : # See if it has a multiplicative factor m = MULTIPLICATIVE_RE . match ( meta_token ) if not m : factor = 1 else : factor = int ( m . group ( 'factor' ) ) meta_token = m . group ( 'token' ) # See if it's a struct-like format tokens = structparser ( meta_token ) ret_vals = [ ] for token in tokens : if keys and token in keys : # Don't bother parsing it, it's a keyword argument ret_vals . append ( [ token , None , None ] ) continue value = length = None if token == '' : continue # Match literal tokens of the form 0x... 0o... and 0b... m = LITERAL_RE . match ( token ) if m : name = m . group ( 'name' ) value = m . group ( 'value' ) ret_vals . append ( [ name , length , value ] ) continue # Match everything else: m1 = TOKEN_RE . match ( token ) if not m1 : # and if you don't specify a 'name' then the default is 'uint': m2 = DEFAULT_UINT . match ( token ) if not m2 : raise ValueError ( "Don't understand token '{0}'." . format ( token ) ) if m1 : name = m1 . group ( 'name' ) length = m1 . group ( 'len' ) if m1 . group ( 'value' ) : value = m1 . group ( 'value' ) else : assert m2 name = 'uint' length = m2 . group ( 'len' ) if m2 . group ( 'value' ) : value = m2 . group ( 'value' ) if name == 'bool' : if length is not None : raise ValueError ( "You can't specify a length with bool tokens - they are always one bit." ) length = 1 if length is None and name not in ( 'se' , 'ue' , 'sie' , 'uie' ) : stretchy_token = True if length is not None : # Try converting length to int, otherwise check it's a key. try : length = int ( length ) if length < 0 : raise Error # For the 'bytes' token convert length to bits. if name == 'bytes' : length *= 8 except Error : raise ValueError ( "Can't read a token with a negative length." ) except ValueError : if not keys or length not in keys : raise ValueError ( "Don't understand length '{0}' of token." . format ( length ) ) ret_vals . append ( [ name , length , value ] ) # This multiplies by the multiplicative factor, but this means that # we can't allow keyword values as multipliers (e.g. n*uint:8). # The only way to do this would be to return the factor in some fashion # (we can't use the key's value here as it would mean that we couldn't # sensibly continue to cache the function's results. (TODO). return_values . extend ( ret_vals * factor ) return_values = [ tuple ( x ) for x in return_values ] if len ( token_cache ) < CACHE_SIZE : token_cache [ token_key ] = stretchy_token , return_values return stretchy_token , return_values
Divide the format string into tokens and parse them .
866
11
249,696
def expand_brackets ( s ) : s = '' . join ( s . split ( ) ) while True : start = s . find ( '(' ) if start == - 1 : break count = 1 # Number of hanging open brackets p = start + 1 while p < len ( s ) : if s [ p ] == '(' : count += 1 if s [ p ] == ')' : count -= 1 if not count : break p += 1 if count : raise ValueError ( "Unbalanced parenthesis in '{0}'." . format ( s ) ) if start == 0 or s [ start - 1 ] != '*' : s = s [ 0 : start ] + s [ start + 1 : p ] + s [ p + 1 : ] else : m = BRACKET_RE . search ( s ) if m : factor = int ( m . group ( 'factor' ) ) matchstart = m . start ( 'factor' ) s = s [ 0 : matchstart ] + ( factor - 1 ) * ( s [ start + 1 : p ] + ',' ) + s [ start + 1 : p ] + s [ p + 1 : ] else : raise ValueError ( "Failed to parse '{0}'." . format ( s ) ) return s
Remove whitespace and expand all brackets .
270
8
249,697
def pack ( fmt , * values , * * kwargs ) : tokens = [ ] if isinstance ( fmt , basestring ) : fmt = [ fmt ] try : for f_item in fmt : _ , tkns = tokenparser ( f_item , tuple ( sorted ( kwargs . keys ( ) ) ) ) tokens . extend ( tkns ) except ValueError as e : raise CreationError ( * e . args ) value_iter = iter ( values ) s = BitStream ( ) try : for name , length , value in tokens : # If the value is in the kwd dictionary then it takes precedence. if value in kwargs : value = kwargs [ value ] # If the length is in the kwd dictionary then use that too. if length in kwargs : length = kwargs [ length ] # Also if we just have a dictionary name then we want to use it if name in kwargs and length is None and value is None : s . append ( kwargs [ name ] ) continue if length is not None : length = int ( length ) if value is None and name != 'pad' : # Take the next value from the ones provided value = next ( value_iter ) s . _append ( BitStream . _init_with_token ( name , length , value ) ) except StopIteration : raise CreationError ( "Not enough parameters present to pack according to the " "format. {0} values are needed." , len ( tokens ) ) try : next ( value_iter ) except StopIteration : # Good, we've used up all the *values. return s raise CreationError ( "Too many parameters present to pack according to the format." )
Pack the values according to the format string and return a new BitStream .
363
15
249,698
def getbyteslice ( self , start , end ) : c = self . _rawarray [ start : end ] return c
Direct access to byte data .
27
6
249,699
def _appendstore ( self , store ) : if not store . bitlength : return # Set new array offset to the number of bits in the final byte of current array. store = offsetcopy ( store , ( self . offset + self . bitlength ) % 8 ) if store . offset : # first do the byte with the join. joinval = ( self . _rawarray . pop ( ) & ( 255 ^ ( 255 >> store . offset ) ) | ( store . getbyte ( 0 ) & ( 255 >> store . offset ) ) ) self . _rawarray . append ( joinval ) self . _rawarray . extend ( store . _rawarray [ 1 : ] ) else : self . _rawarray . extend ( store . _rawarray ) self . bitlength += store . bitlength
Join another store on to the end of this one .
168
11