idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
250,600
def _set_name ( self ) : try : self . _name = pretty ( self . machine_name ) self . _serial = self . serial_number except AttributeError : self . _name = None self . _serial = None
Set device name .
250,601
def to_unit ( value , unit = 'B' ) : byte_array = [ 'B' , 'KB' , 'MB' , 'GB' , 'TB' ] if not isinstance ( value , ( int , float ) ) : value = float ( value ) if unit in byte_array : result = value / 1024 ** byte_array . index ( unit ) return round ( result , PRECISION ) , unit return value
Convert bytes to give unit .
250,602
def realtime_stream ( self , channel = 1 , typeno = 0 , path_file = None ) : ret = self . command ( 'realmonitor.cgi?action=getStream&channel={0}&subtype={1}' . format ( channel , typeno ) ) if path_file : with open ( path_file , 'wb' ) as out_file : shutil . copyfileobj ( ret . raw , out_file ) return ret . raw
If the stream is redirect to a file use mplayer tool to visualize the video record
250,603
def rtsp_url ( self , channelno = None , typeno = None ) : if channelno is None : channelno = 1 if typeno is None : typeno = 0 cmd = 'cam/realmonitor?channel={0}&subtype={1}' . format ( channelno , typeno ) try : port = ':' + [ x . split ( '=' ) [ 1 ] for x in self . rtsp_config . split ( ) if x . startswith ( 'table.RTSP.Port=' ) ] [ 0 ] except IndexError : port = '' return 'rtsp://{}:{}@{}{}/{}' . format ( self . _user , self . _password , self . _host , port , cmd )
Return RTSP streaming url
250,604
def mjpeg_url ( self , channelno = None , typeno = None ) : if channelno is None : channelno = 0 if typeno is None : typeno = 1 cmd = "mjpg/video.cgi?channel={0}&subtype={1}" . format ( channelno , typeno ) return '{0}{1}' . format ( self . _base_url , cmd )
Return MJPEG streaming url
250,605
def scan_devices ( self , subnet , timeout = None ) : max_range = { 16 : 256 , 24 : 256 , 25 : 128 , 27 : 32 , 28 : 16 , 29 : 8 , 30 : 4 , 31 : 2 } if "/" not in subnet : mask = int ( 24 ) network = subnet else : network , mask = subnet . split ( "/" ) mask = int ( mask ) if mask not in max_range : raise RuntimeError ( "Cannot determine the subnet mask!" ) network = network . rpartition ( "." ) [ 0 ] if mask == 16 : for i in range ( 0 , 1 ) : network = network . rpartition ( "." ) [ 0 ] if mask == 16 : for seq1 in range ( 0 , max_range [ mask ] ) : for seq2 in range ( 0 , max_range [ mask ] ) : ipaddr = "{0}.{1}.{2}" . format ( network , seq1 , seq2 ) thd = threading . Thread ( target = self . __raw_scan , args = ( ipaddr , timeout ) ) thd . start ( ) else : for seq1 in range ( 0 , max_range [ mask ] ) : ipaddr = "{0}.{1}" . format ( network , seq1 ) thd = threading . Thread ( target = self . __raw_scan , args = ( ipaddr , timeout ) ) thd . start ( ) return self . amcrest_ips
Scan cameras in a range of ips
250,606
def disallow ( self , foreign , permission = "active" , account = None , threshold = None , ** kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) if permission not in [ "owner" , "active" ] : raise ValueError ( "Permission needs to be either 'owner', or 'active" ) account = Account ( account , blockchain_instance = self ) authority = account [ permission ] try : pubkey = PublicKey ( foreign , prefix = self . prefix ) affected_items = list ( filter ( lambda x : x [ 0 ] == str ( pubkey ) , authority [ "key_auths" ] ) ) authority [ "key_auths" ] = list ( filter ( lambda x : x [ 0 ] != str ( pubkey ) , authority [ "key_auths" ] ) ) except : try : foreign_account = Account ( foreign , blockchain_instance = self ) affected_items = list ( filter ( lambda x : x [ 0 ] == foreign_account [ "id" ] , authority [ "account_auths" ] , ) ) authority [ "account_auths" ] = list ( filter ( lambda x : x [ 0 ] != foreign_account [ "id" ] , authority [ "account_auths" ] , ) ) except : raise ValueError ( "Unknown foreign account or unvalid public key" ) if not affected_items : raise ValueError ( "Changes nothing!" ) removed_weight = affected_items [ 0 ] [ 1 ] if threshold : authority [ "weight_threshold" ] = threshold try : self . _test_weights_treshold ( authority ) except : log . critical ( "The account's threshold will be reduced by %d" % ( removed_weight ) ) authority [ "weight_threshold" ] -= removed_weight self . _test_weights_treshold ( authority ) op = operations . Account_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , permission : authority , "extensions" : { } , } ) if permission == "owner" : return self . finalizeOp ( op , account [ "name" ] , "owner" , ** kwargs ) else : return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Remove additional access to an account by some other public key or account .
250,607
def approvewitness ( self , witnesses , account = None , ** kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account , blockchain_instance = self ) options = account [ "options" ] if not isinstance ( witnesses , ( list , set , tuple ) ) : witnesses = { witnesses } for witness in witnesses : witness = Witness ( witness , blockchain_instance = self ) options [ "votes" ] . append ( witness [ "vote_id" ] ) options [ "votes" ] = list ( set ( options [ "votes" ] ) ) options [ "num_witness" ] = len ( list ( filter ( lambda x : float ( x . split ( ":" ) [ 0 ] ) == 1 , options [ "votes" ] ) ) ) op = operations . Account_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , "new_options" : options , "extensions" : { } , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Approve a witness
250,608
def approvecommittee ( self , committees , account = None , ** kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account , blockchain_instance = self ) options = account [ "options" ] if not isinstance ( committees , ( list , set , tuple ) ) : committees = { committees } for committee in committees : committee = Committee ( committee , blockchain_instance = self ) options [ "votes" ] . append ( committee [ "vote_id" ] ) options [ "votes" ] = list ( set ( options [ "votes" ] ) ) options [ "num_committee" ] = len ( list ( filter ( lambda x : float ( x . split ( ":" ) [ 0 ] ) == 0 , options [ "votes" ] ) ) ) op = operations . Account_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , "new_options" : options , "extensions" : { } , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Approve a committee
250,609
def betting_market_rules_create ( self , names , descriptions , account = None , ** kwargs ) : assert isinstance ( names , list ) assert isinstance ( descriptions , list ) if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) op = operations . Betting_market_rules_create ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "name" : names , "description" : descriptions , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Create betting market rules
250,610
def betting_market_rules_update ( self , rules_id , names , descriptions , account = None , ** kwargs ) : assert isinstance ( names , list ) assert isinstance ( descriptions , list ) if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) rule = Rule ( rules_id ) op = operations . Betting_market_rules_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "betting_market_rules_id" : rule [ "id" ] , "new_name" : names , "new_description" : descriptions , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Update betting market rules
250,611
def bet_place ( self , betting_market_id , amount_to_bet , backer_multiplier , back_or_lay , account = None , ** kwargs ) : from . import GRAPHENE_BETTING_ODDS_PRECISION assert isinstance ( amount_to_bet , Amount ) assert back_or_lay in [ "back" , "lay" ] if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) bm = BettingMarket ( betting_market_id ) op = operations . Bet_place ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "bettor_id" : account [ "id" ] , "betting_market_id" : bm [ "id" ] , "amount_to_bet" : amount_to_bet . json ( ) , "backer_multiplier" : ( int ( backer_multiplier * GRAPHENE_BETTING_ODDS_PRECISION ) ) , "back_or_lay" : back_or_lay , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Place a bet
250,612
def bet_cancel ( self , bet_to_cancel , account = None , ** kwargs ) : if not account : if "default_account" in self . config : account = self . config [ "default_account" ] if not account : raise ValueError ( "You need to provide an account" ) account = Account ( account ) bet = Bet ( bet_to_cancel ) op = operations . Bet_cancel ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "bettor_id" : account [ "id" ] , "bet_to_cancel" : bet [ "id" ] , "prefix" : self . prefix , } ) return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
Cancel a bet
250,613
def verbose ( f ) : @ click . pass_context def new_func ( ctx , * args , ** kwargs ) : global log verbosity = [ "critical" , "error" , "warn" , "info" , "debug" ] [ int ( min ( ctx . obj . get ( "verbose" , 0 ) , 4 ) ) ] log . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) formatter = logging . Formatter ( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) ch = logging . StreamHandler ( ) ch . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) ch . setFormatter ( formatter ) log . addHandler ( ch ) if ctx . obj . get ( "verbose" , 0 ) > 4 : verbosity = [ "critical" , "error" , "warn" , "info" , "debug" ] [ int ( min ( ctx . obj . get ( "verbose" , 4 ) - 4 , 4 ) ) ] log = logging . getLogger ( "grapheneapi" ) log . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) log . addHandler ( ch ) if ctx . obj . get ( "verbose" , 0 ) > 8 : verbosity = [ "critical" , "error" , "warn" , "info" , "debug" ] [ int ( min ( ctx . obj . get ( "verbose" , 8 ) - 8 , 4 ) ) ] log = logging . getLogger ( "graphenebase" ) log . setLevel ( getattr ( logging , verbosity . upper ( ) ) ) log . addHandler ( ch ) return ctx . invoke ( f , * args , ** kwargs ) return update_wrapper ( new_func , f )
Add verbose flags and add logging handlers
250,614
def offline ( f ) : @ click . pass_context @ verbose def new_func ( ctx , * args , ** kwargs ) : ctx . obj [ "offline" ] = True ctx . peerplays = PeerPlays ( ** ctx . obj ) ctx . blockchain = ctx . peerplays set_shared_peerplays_instance ( ctx . peerplays ) return ctx . invoke ( f , * args , ** kwargs ) return update_wrapper ( new_func , f )
This decorator allows you to access ctx . peerplays which is an instance of PeerPlays with offline = True .
250,615
def configfile ( f ) : @ click . pass_context def new_func ( ctx , * args , ** kwargs ) : ctx . config = yaml . load ( open ( ctx . obj [ "configfile" ] ) ) return ctx . invoke ( f , * args , ** kwargs ) return update_wrapper ( new_func , f )
This decorator will parse a configuration file in YAML format and store the dictionary in ctx . blockchain . config
250,616
def on_message ( self , ws , reply , * args ) : log . debug ( "Received message: %s" % str ( reply ) ) data = { } try : data = json . loads ( reply , strict = False ) except ValueError : raise ValueError ( "API node returned invalid format. Expected JSON!" ) if data . get ( "method" ) == "notice" : id = data [ "params" ] [ 0 ] if id >= len ( self . __events__ ) : log . critical ( "Received an id that is out of range\n\n" + str ( data ) ) return if id == self . __events__ . index ( "on_object" ) : for notice in data [ "params" ] [ 1 ] : try : if "id" in notice : self . process_notice ( notice ) else : for obj in notice : if "id" in obj : self . process_notice ( obj ) except Exception as e : log . critical ( "Error in process_notice: {}\n\n{}" . format ( str ( e ) , traceback . format_exc ) ) else : try : callbackname = self . __events__ [ id ] log . info ( "Patching through to call %s" % callbackname ) [ getattr ( self . events , callbackname ) ( x ) for x in data [ "params" ] [ 1 ] ] except Exception as e : log . critical ( "Error in {}: {}\n\n{}" . format ( callbackname , str ( e ) , traceback . format_exc ( ) ) )
This method is called by the websocket connection on every message that is received . If we receive a notice we hand over post - processing and signalling of events to process_notice .
250,617
def on_close ( self , ws ) : log . debug ( "Closing WebSocket connection with {}" . format ( self . url ) ) if self . keepalive and self . keepalive . is_alive ( ) : self . keepalive . do_run = False self . keepalive . join ( )
Called when websocket connection is closed
250,618
def run_forever ( self ) : cnt = 0 while True : cnt += 1 self . url = next ( self . urls ) log . debug ( "Trying to connect to node %s" % self . url ) try : self . ws = websocket . WebSocketApp ( self . url , on_message = self . on_message , on_error = self . on_error , on_close = self . on_close , on_open = self . on_open , ) self . ws . run_forever ( ) except websocket . WebSocketException as exc : if self . num_retries >= 0 and cnt > self . num_retries : raise NumRetriesReached ( ) sleeptime = ( cnt - 1 ) * 2 if cnt < 10 else 10 if sleeptime : log . warning ( "Lost connection to node during wsconnect(): %s (%d/%d) " % ( self . url , cnt , self . num_retries ) + "Retrying in %d seconds" % sleeptime ) time . sleep ( sleeptime ) except KeyboardInterrupt : self . ws . keep_running = False raise except Exception as e : log . critical ( "{}\n\n{}" . format ( str ( e ) , traceback . format_exc ( ) ) )
This method is used to run the websocket app continuously . It will execute callbacks as defined and try to stay connected with the provided APIs
250,619
def register_dataframe_method ( method ) : def inner ( * args , ** kwargs ) : class AccessorMethod ( object ) : def __init__ ( self , pandas_obj ) : self . _obj = pandas_obj @ wraps ( method ) def __call__ ( self , * args , ** kwargs ) : return method ( self . _obj , * args , ** kwargs ) register_dataframe_accessor ( method . __name__ ) ( AccessorMethod ) return method return inner ( )
Register a function as a method attached to the Pandas DataFrame .
250,620
def register_series_method ( method ) : def inner ( * args , ** kwargs ) : class AccessorMethod ( object ) : __doc__ = method . __doc__ def __init__ ( self , pandas_obj ) : self . _obj = pandas_obj @ wraps ( method ) def __call__ ( self , * args , ** kwargs ) : return method ( self . _obj , * args , ** kwargs ) register_series_accessor ( method . __name__ ) ( AccessorMethod ) return method return inner ( )
Register a function as a method attached to the Pandas Series .
250,621
def add_invites_to_user ( cls , user , amount ) : stat , _ = InvitationStat . objects . get_or_create ( user = user ) if stat . invites_allocated != - 1 : stat . invites_allocated += amount stat . save ( )
Add the specified number of invites to current allocated total .
250,622
def add_invites ( cls , amount ) : for user in get_user_model ( ) . objects . all ( ) : cls . add_invites_to_user ( user , amount )
Add invites for all users .
250,623
def topoff_user ( cls , user , amount ) : stat , _ = cls . objects . get_or_create ( user = user ) remaining = stat . invites_remaining ( ) if remaining != - 1 and remaining < amount : stat . invites_allocated += ( amount - remaining ) stat . save ( )
Ensure user has a minimum number of invites .
250,624
def topoff ( cls , amount ) : for user in get_user_model ( ) . objects . all ( ) : cls . topoff_user ( user , amount )
Ensure all users have a minimum number of invites .
250,625
def align ( self , alignment = None ) : if alignment is None : if self . reader . sysinfo . ProcessorArchitecture == PROCESSOR_ARCHITECTURE . AMD64 : alignment = 8 else : alignment = 4 offset = self . current_position % alignment if offset == 0 : return offset_to_aligned = ( alignment - offset ) % alignment self . seek ( offset_to_aligned , 1 ) return
Repositions the current reader to match architecture alignment
250,626
def peek ( self , length ) : t = self . current_position + length if not self . current_segment . inrange ( t ) : raise Exception ( 'Would read over segment boundaries!' ) return self . current_segment . data [ self . current_position - self . current_segment . start_address : t - self . current_segment . start_address ]
Returns up to length bytes from the current memory segment
250,627
def read ( self , size = - 1 ) : if size < - 1 : raise Exception ( 'You shouldnt be doing this' ) if size == - 1 : t = self . current_segment . remaining_len ( self . current_position ) if not t : return None old_new_pos = self . current_position self . current_position = self . current_segment . end_address return self . current_segment . data [ old_new_pos - self . current_segment . start_address : ] t = self . current_position + size if not self . current_segment . inrange ( t ) : raise Exception ( 'Would read over segment boundaries!' ) old_new_pos = self . current_position self . current_position = t return self . current_segment . data [ old_new_pos - self . current_segment . start_address : t - self . current_segment . start_address ]
Returns data bytes of size size from the current segment . If size is - 1 it returns all the remaining data bytes from memory segment
250,628
def read_int ( self ) : if self . reader . sysinfo . ProcessorArchitecture == PROCESSOR_ARCHITECTURE . AMD64 : return int . from_bytes ( self . read ( 8 ) , byteorder = 'little' , signed = True ) else : return int . from_bytes ( self . read ( 4 ) , byteorder = 'little' , signed = True )
Reads an integer . The size depends on the architecture . Reads a 4 byte small - endian singed int on 32 bit arch Reads an 8 byte small - endian singed int on 64 bit arch
250,629
def read_uint ( self ) : if self . reader . sysinfo . ProcessorArchitecture == PROCESSOR_ARCHITECTURE . AMD64 : return int . from_bytes ( self . read ( 8 ) , byteorder = 'little' , signed = False ) else : return int . from_bytes ( self . read ( 4 ) , byteorder = 'little' , signed = False )
Reads an integer . The size depends on the architecture . Reads a 4 byte small - endian unsinged int on 32 bit arch Reads an 8 byte small - endian unsinged int on 64 bit arch
250,630
def find ( self , pattern ) : pos = self . current_segment . data . find ( pattern ) if pos == - 1 : return - 1 return pos + self . current_position
Searches for a pattern in the current memory segment
250,631
def find_all ( self , pattern ) : pos = [ ] last_found = - 1 while True : last_found = self . current_segment . data . find ( pattern , last_found + 1 ) if last_found == - 1 : break pos . append ( last_found + self . current_segment . start_address ) return pos
Searches for all occurrences of a pattern in the current memory segment returns all occurrences as a list
250,632
def find_global ( self , pattern ) : pos_s = self . reader . search ( pattern ) if len ( pos_s ) == 0 : return - 1 return pos_s [ 0 ]
Searches for the pattern in the whole process memory space and returns the first occurrence . This is exhaustive!
250,633
def report_privilege_information ( ) : "Report all privilege information assigned to the current process." privileges = get_privilege_information ( ) print ( "found {0} privileges" . format ( privileges . count ) ) tuple ( map ( print , privileges ) )
Report all privilege information assigned to the current process .
250,634
async def handle ( self ) : listeners = [ ] for key , value in self . beat_config . items ( ) : listeners . append ( asyncio . ensure_future ( self . listener ( key ) ) ) emitters = [ ] for key , value in self . beat_config . items ( ) : emitters . append ( asyncio . ensure_future ( self . emitters ( key , value ) ) ) await asyncio . wait ( emitters ) await asyncio . wait ( listeners )
Listens on all the provided channels and handles the messages .
250,635
async def emitters ( self , key , value ) : while True : await asyncio . sleep ( value [ 'schedule' ] . total_seconds ( ) ) await self . channel_layer . send ( key , { "type" : value [ 'type' ] , "message" : value [ 'message' ] } )
Single - channel emitter
250,636
async def listener ( self , channel ) : while True : message = await self . channel_layer . receive ( channel ) if not message . get ( "type" , None ) : raise ValueError ( "Worker received message with no type." ) scope = { "type" : "channel" , "channel" : channel } instance_queue = self . get_or_create_application_instance ( channel , scope ) await instance_queue . put ( message )
Single - channel listener
250,637
def rating_count ( obj ) : count = Rating . objects . filter ( object_id = obj . pk , content_type = ContentType . objects . get_for_model ( obj ) , ) . exclude ( rating = 0 ) . count ( ) return count
Total amount of users who have submitted a positive rating for this object .
250,638
def set_pixel ( self , x , y , value ) : if x < 0 or x > 7 or y < 0 or y > 7 : return self . set_led ( y * 16 + x , 1 if value & GREEN > 0 else 0 ) self . set_led ( y * 16 + x + 8 , 1 if value & RED > 0 else 0 )
Set pixel at position x y to the given value . X and Y should be values of 0 to 8 . Value should be OFF GREEN RED or YELLOW .
250,639
def set_bar ( self , bar , value ) : if bar < 0 or bar > 23 : return c = ( bar if bar < 12 else bar - 12 ) // 4 a = bar % 4 if bar >= 12 : a += 4 self . set_led ( c * 16 + a + 8 , 1 if value & GREEN > 0 else 0 ) self . set_led ( c * 16 + a , 1 if value & RED > 0 else 0 )
Set bar to desired color . Bar should be a value of 0 to 23 and value should be OFF GREEN RED or YELLOW .
250,640
def animate ( self , images , delay = .25 ) : for image in images : self . set_image ( image ) self . write_display ( ) time . sleep ( delay )
Displays each of the input images in order pausing for delay seconds after each image .
250,641
def set_pixel ( self , x , y , value ) : if x < 0 or x > 7 or y < 0 or y > 15 : return self . set_led ( ( 7 - x ) * 16 + y , value )
Set pixel at position x y to the given value . X and Y should be values of 0 to 7 and 0 to 15 resp . Value should be 0 for off and non - zero for on .
250,642
def set_image ( self , image ) : imwidth , imheight = image . size if imwidth != 8 or imheight != 16 : raise ValueError ( 'Image must be an 8x16 pixels in size.' ) pix = image . convert ( '1' ) . load ( ) for x in xrange ( 8 ) : for y in xrange ( 16 ) : color = pix [ ( x , y ) ] if color == 0 : self . set_pixel ( x , y , 0 ) else : self . set_pixel ( x , y , 1 )
Set display buffer to Python Image Library image . Image will be converted to 1 bit color and non - zero color values will light the LEDs .
250,643
def horizontal_scroll ( self , image , padding = True ) : image_list = list ( ) width = image . size [ 0 ] if padding : for x in range ( 8 ) : section = image . crop ( ( 0 , 0 , x , 16 ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 8 - x , 0 , 8 , 16 ) ) image_list . append ( display_section ) for x in range ( 8 , width + 1 ) : section = image . crop ( ( x - 8 , 0 , x , 16 ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 8 , 16 ) ) image_list . append ( display_section ) if padding : for x in range ( width - 7 , width + 1 ) : section = image . crop ( ( x , 0 , width , 16 ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 7 - ( x - ( width - 7 ) ) , 16 ) ) image_list . append ( display_section ) return image_list
Returns a list of images which appear to scroll from left to right across the input image when displayed on the LED matrix in order .
250,644
def vertical_scroll ( self , image , padding = True ) : image_list = list ( ) height = image . size [ 1 ] if padding : for y in range ( 16 ) : section = image . crop ( ( 0 , 0 , 8 , y ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 8 - y , 8 , 16 ) ) image_list . append ( display_section ) for y in range ( 16 , height + 1 ) : section = image . crop ( ( 0 , y - 16 , 8 , y ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 8 , 16 ) ) image_list . append ( display_section ) if padding : for y in range ( height - 15 , height + 1 ) : section = image . crop ( ( 0 , y , 8 , height ) ) display_section = self . create_blank_image ( ) display_section . paste ( section , ( 0 , 0 , 8 , 7 - ( y - ( height - 15 ) ) ) ) image_list . append ( display_section ) return image_list
Returns a list of images which appear to scroll from top to bottom down the input image when displayed on the LED matrix in order .
250,645
def print_number_str ( self , value , justify_right = True ) : length = len ( value . translate ( None , '.' ) ) if length > 4 : self . print_str ( '----' ) return pos = ( 4 - length ) if justify_right else 0 for i , ch in enumerate ( value ) : if ch == '.' : self . set_decimal ( pos - 1 , True ) else : self . set_digit ( pos , ch ) pos += 1
Print a 4 character long string of numeric values to the display . This function is similar to print_str but will interpret periods not as characters but as decimal points associated with the previous character .
250,646
def print_float ( self , value , decimal_digits = 2 , justify_right = True ) : format_string = '{{0:0.{0}F}}' . format ( decimal_digits ) self . print_number_str ( format_string . format ( value ) , justify_right )
Print a numeric value to the display . If value is negative it will be printed with a leading minus sign . Decimal digits is the desired number of digits after the decimal point .
250,647
def set_left_colon ( self , show_colon ) : if show_colon : self . buffer [ 4 ] |= 0x04 self . buffer [ 4 ] |= 0x08 else : self . buffer [ 4 ] &= ( ~ 0x04 ) & 0xFF self . buffer [ 4 ] &= ( ~ 0x08 ) & 0xFF
Turn the left colon on with show color True or off with show colon False . Only the large 1 . 2 7 - segment display has a left colon .
250,648
def print_number_str ( self , value , justify_right = True ) : length = sum ( map ( lambda x : 1 if x != '.' else 0 , value ) ) if length > 4 : self . print_number_str ( '----' ) return pos = ( 4 - length ) if justify_right else 0 for i , ch in enumerate ( value ) : if ch == '.' : self . set_decimal ( pos - 1 , True ) else : self . set_digit ( pos , ch ) pos += 1
Print a 4 character long string of numeric values to the display . Characters in the string should be any supported character by set_digit or a decimal point . Decimal point characters will be associated with the previous character .
250,649
def begin ( self ) : self . _device . writeList ( HT16K33_SYSTEM_SETUP | HT16K33_OSCILLATOR , [ ] ) self . set_blink ( HT16K33_BLINK_OFF ) self . set_brightness ( 15 )
Initialize driver with LEDs enabled and all turned off .
250,650
def write_display ( self ) : for i , value in enumerate ( self . buffer ) : self . _device . write8 ( i , value )
Write display buffer to display hardware .
250,651
def clear ( self ) : for i , value in enumerate ( self . buffer ) : self . buffer [ i ] = 0
Clear contents of display buffer .
250,652
def get_readonly_fields ( self , request , obj = None ) : if obj : return list ( self . readonly_fields ) + [ 'id' , 'identity' , 'is_current' ] return self . readonly_fields
This is required a subclass of VersionedAdmin has readonly_fields ours won t be undone
250,653
def get_list_display ( self , request ) : list_display = list ( super ( VersionedAdmin , self ) . get_list_display ( request ) ) if self . list_display_show_identity : list_display = [ 'identity_shortener' , ] + list_display if self . list_display_show_start_date : list_display += [ 'version_start_date' , ] if self . list_display_show_end_date : list_display += [ 'version_end_date' , ] return list_display + [ 'is_current' , ]
This method determines which fields go in the changelist
250,654
def get_list_filter ( self , request ) : list_filter = super ( VersionedAdmin , self ) . get_list_filter ( request ) return list ( list_filter ) + [ ( 'version_start_date' , DateTimeFilter ) , IsCurrentFilter ]
Adds versionable custom filtering ability to changelist
250,655
def restore ( self , request , * args , ** kwargs ) : paths = request . path_info . split ( '/' ) object_id_index = paths . index ( "restore" ) - 2 object_id = paths [ object_id_index ] obj = super ( VersionedAdmin , self ) . get_object ( request , object_id ) obj . restore ( ) admin_wordIndex = object_id_index - 3 path = "/%s" % ( "/" . join ( paths [ admin_wordIndex : object_id_index ] ) ) opts = self . model . _meta msg_dict = { 'name' : force_text ( opts . verbose_name ) , 'obj' : format_html ( '<a href="{}">{}</a>' , urlquote ( request . path ) , obj ) , } msg = format_html ( _ ( 'The {name} "{obj}" was restored successfully.' ) , ** msg_dict ) self . message_user ( request , msg , messages . SUCCESS ) return HttpResponseRedirect ( path )
View for restoring object from change view
250,656
def will_not_clone ( self , request , * args , ** kwargs ) : paths = request . path_info . split ( '/' ) index_of_object_id = paths . index ( "will_not_clone" ) - 1 object_id = paths [ index_of_object_id ] self . change_view ( request , object_id ) admin_wordInUrl = index_of_object_id - 3 path = '/' + '/' . join ( paths [ admin_wordInUrl : index_of_object_id ] ) return HttpResponseRedirect ( path )
Add save but not clone capability in the changeview
250,657
def exclude ( self ) : exclude = self . VERSIONED_EXCLUDE if super ( VersionedAdmin , self ) . exclude is not None : exclude = list ( super ( VersionedAdmin , self ) . exclude ) + exclude return exclude
Custom descriptor for exclude since there is no get_exclude method to be overridden
250,658
def get_object ( self , request , object_id , from_field = None ) : obj = super ( VersionedAdmin , self ) . get_object ( request , object_id ) if request . method == 'POST' and obj and obj . is_latest and 'will_not_clone' not in request . path and 'delete' not in request . path and 'restore' not in request . path : obj = obj . clone ( ) return obj
our implementation of get_object allows for cloning when updating an object not cloning when the button save but not clone is pushed and at no other time will clone be called
250,659
def get_urls ( self ) : not_clone_url = [ url ( r'^(.+)/will_not_clone/$' , admin . site . admin_view ( self . will_not_clone ) ) ] restore_url = [ url ( r'^(.+)/restore/$' , admin . site . admin_view ( self . restore ) ) ] return not_clone_url + restore_url + super ( VersionedAdmin , self ) . get_urls ( )
Appends the custom will_not_clone url to the admin site
250,660
def create_current_version_unique_identity_indexes ( app_name , database = None ) : indexes_created = 0 connection = database_connection ( database ) with connection . cursor ( ) as cursor : for model in versionable_models ( app_name ) : if getattr ( model . _meta , 'managed' , True ) : table_name = model . _meta . db_table index_name = '%s_%s_identity_v_uniq' % ( app_name , table_name ) if not index_exists ( cursor , index_name ) : cursor . execute ( "CREATE UNIQUE INDEX %s ON %s(%s) " "WHERE version_end_date IS NULL" % ( index_name , table_name , 'identity' ) ) indexes_created += 1 return indexes_created
Add partial unique indexes for the the identity column of versionable models .
250,661
def get_queryset ( self ) : qs = VersionedQuerySet ( self . model , using = self . _db ) if hasattr ( self , 'instance' ) and hasattr ( self . instance , '_querytime' ) : qs . querytime = self . instance . _querytime return qs
Returns a VersionedQuerySet capable of handling version time restrictions .
250,662
def next_version ( self , object , relations_as_of = 'end' ) : if object . version_end_date is None : next = object else : next = self . filter ( Q ( identity = object . identity ) , Q ( version_start_date__gte = object . version_end_date ) ) . order_by ( 'version_start_date' ) . first ( ) if not next : raise ObjectDoesNotExist ( "next_version couldn't find a next version of object " + str ( object . identity ) ) return self . adjust_version_as_of ( next , relations_as_of )
Return the next version of the given object .
250,663
def previous_version ( self , object , relations_as_of = 'end' ) : if object . version_birth_date == object . version_start_date : previous = object else : previous = self . filter ( Q ( identity = object . identity ) , Q ( version_end_date__lte = object . version_start_date ) ) . order_by ( '-version_end_date' ) . first ( ) if not previous : raise ObjectDoesNotExist ( "previous_version couldn't find a previous version of " "object " + str ( object . identity ) ) return self . adjust_version_as_of ( previous , relations_as_of )
Return the previous version of the given object .
250,664
def current_version ( self , object , relations_as_of = None , check_db = False ) : if object . version_end_date is None and not check_db : current = object else : current = self . current . filter ( identity = object . identity ) . first ( ) return self . adjust_version_as_of ( current , relations_as_of )
Return the current version of the given object .
250,665
def adjust_version_as_of ( version , relations_as_of ) : if not version : return version if relations_as_of == 'end' : if version . is_current : version . as_of = None else : version . as_of = version . version_end_date - datetime . timedelta ( microseconds = 1 ) elif relations_as_of == 'start' : version . as_of = version . version_start_date elif isinstance ( relations_as_of , datetime . datetime ) : as_of = relations_as_of . astimezone ( utc ) if not as_of >= version . version_start_date : raise ValueError ( "Provided as_of '{}' is earlier than version's start " "time '{}'" . format ( as_of . isoformat ( ) , version . version_start_date . isoformat ( ) ) ) if version . version_end_date is not None and as_of >= version . version_end_date : raise ValueError ( "Provided as_of '{}' is later than version's start " "time '{}'" . format ( as_of . isoformat ( ) , version . version_end_date . isoformat ( ) ) ) version . as_of = as_of elif relations_as_of is None : version . _querytime = QueryTime ( time = None , active = False ) else : raise TypeError ( "as_of parameter must be 'start', 'end', None, or datetime " "object" ) return version
Adjusts the passed version s as_of time to an appropriate value and returns it .
250,666
def _fetch_all ( self ) : if self . _result_cache is None : self . _result_cache = list ( self . iterator ( ) ) if self . _iterable_class == ModelIterable : for x in self . _result_cache : self . _set_item_querytime ( x ) if self . _prefetch_related_lookups and not self . _prefetch_done : self . _prefetch_related_objects ( )
Completely overrides the QuerySet . _fetch_all method by adding the timestamp to all objects
250,667
def _clone ( self , * args , ** kwargs ) : clone = super ( VersionedQuerySet , self ) . _clone ( ** kwargs ) clone . querytime = self . querytime return clone
Overrides the QuerySet . _clone method by adding the cloning of the VersionedQuerySet s query_time parameter
250,668
def _set_item_querytime ( self , item , type_check = True ) : if isinstance ( item , Versionable ) : item . _querytime = self . querytime elif isinstance ( item , VersionedQuerySet ) : item . querytime = self . querytime else : if type_check : raise TypeError ( "This item is not a Versionable, it's a " + str ( type ( item ) ) ) return item
Sets the time for which the query was made on the resulting item
250,669
def as_of ( self , qtime = None ) : clone = self . _clone ( ) clone . querytime = QueryTime ( time = qtime , active = True ) return clone
Sets the time for which we want to retrieve an object .
250,670
def delete ( self ) : assert self . query . can_filter ( ) , "Cannot use 'limit' or 'offset' with delete." del_query = self . filter ( version_end_date__isnull = True ) del_query . _for_write = True del_query . query . select_for_update = False del_query . query . select_related = False del_query . query . clear_ordering ( force_empty = True ) collector_class = get_versioned_delete_collector_class ( ) collector = collector_class ( using = del_query . db ) collector . collect ( del_query ) collector . delete ( get_utc_now ( ) ) self . _result_cache = None
Deletes the records in the QuerySet .
250,671
def uuid ( uuid_value = None ) : if uuid_value : if not validate_uuid ( uuid_value ) : raise ValueError ( "uuid_value must be a valid UUID version 4 object" ) else : uuid_value = uuid . uuid4 ( ) if versions_settings . VERSIONS_USE_UUIDFIELD : return uuid_value else : return six . u ( str ( uuid_value ) )
Returns a uuid value that is valid to use for id and identity fields .
250,672
def restore ( self , ** kwargs ) : if not self . pk : raise ValueError ( 'Instance must be saved and terminated before it can be ' 'restored.' ) if self . is_current : raise ValueError ( 'This is the current version, no need to restore it.' ) if self . get_deferred_fields ( ) : raise ValueError ( 'Can not restore a model instance that has deferred fields' ) cls = self . __class__ now = get_utc_now ( ) restored = copy . copy ( self ) restored . version_end_date = None restored . version_start_date = now fields = [ f for f in cls . _meta . local_fields if f . name not in Versionable . VERSIONABLE_FIELDS ] for field in fields : if field . attname in kwargs : obj = field . remote_field . model ( id = kwargs [ field . attname ] ) setattr ( restored , field . name , obj ) elif field . name in kwargs : setattr ( restored , field . name , kwargs [ field . name ] ) elif isinstance ( field , ForeignKey ) : try : setattr ( restored , field . name , None ) if not field . null : raise ValueError except ValueError : raise ForeignKeyRequiresValueError self . id = self . uuid ( ) with transaction . atomic ( ) : latest = cls . objects . current_version ( self , check_db = True ) if latest and latest != self : latest . delete ( ) restored . version_start_date = latest . version_end_date self . save ( ) restored . save ( ) for field_name in self . get_all_m2m_field_names ( ) : manager = getattr ( restored , field_name ) manager . through . objects . filter ( ** { manager . source_field . attname : restored . id } ) . update ( ** { manager . source_field_name : self } ) return restored
Restores this version as a new version and returns this new version .
250,673
def detach ( self ) : self . id = self . identity = self . uuid ( ) self . version_start_date = self . version_birth_date = get_utc_now ( ) self . version_end_date = None return self
Detaches the instance from its history .
250,674
def matches_querytime ( instance , querytime ) : if not querytime . active : return True if not querytime . time : return instance . version_end_date is None return ( instance . version_start_date <= querytime . time and ( instance . version_end_date is None or instance . version_end_date > querytime . time ) )
Checks whether the given instance satisfies the given QueryTime object .
250,675
def contribute_to_related_class ( self , cls , related ) : super ( VersionedForeignKey , self ) . contribute_to_related_class ( cls , related ) accessor_name = related . get_accessor_name ( ) if hasattr ( cls , accessor_name ) : setattr ( cls , accessor_name , VersionedReverseManyToOneDescriptor ( related ) )
Override ForeignKey s methods and replace the descriptor if set by the parent s methods
250,676
def get_joining_columns ( self , reverse_join = False ) : source = self . reverse_related_fields if reverse_join else self . related_fields joining_columns = tuple ( ) for lhs_field , rhs_field in source : lhs_col_name = lhs_field . column rhs_col_name = rhs_field . column if self is lhs_field and not self . auto_created : if rhs_col_name == Versionable . VERSION_IDENTIFIER_FIELD : rhs_col_name = Versionable . OBJECT_IDENTIFIER_FIELD elif self is rhs_field and not self . auto_created : if lhs_col_name == Versionable . VERSION_IDENTIFIER_FIELD : lhs_col_name = Versionable . OBJECT_IDENTIFIER_FIELD joining_columns = joining_columns + ( ( lhs_col_name , rhs_col_name ) , ) return joining_columns
Get and return joining columns defined by this foreign key relationship
250,677
def get_versioned_delete_collector_class ( ) : key = 'VERSIONED_DELETE_COLLECTOR' try : cls = _cache [ key ] except KeyError : collector_class_string = getattr ( settings , key ) cls = import_from_string ( collector_class_string , key ) _cache [ key ] = cls return cls
Gets the class to use for deletion collection .
250,678
def related_objects ( self , related , objs ) : from versions . models import Versionable related_model = related . related_model if issubclass ( related_model , Versionable ) : qs = related_model . objects . current else : qs = related_model . _base_manager . all ( ) return qs . using ( self . using ) . filter ( ** { "%s__in" % related . field . name : objs } )
Gets a QuerySet of current objects related to objs via the relation related .
250,679
def versionable_delete ( self , instance , timestamp ) : instance . _delete_at ( timestamp , using = self . using )
Soft - deletes the instance setting it s version_end_date to timestamp .
250,680
def pks_from_objects ( self , objects ) : return { o . pk if isinstance ( o , Model ) else o for o in objects }
Extract all the primary key strings from the given objects . Objects may be Versionables or bare primary keys .
250,681
def fit ( self , vecs , iter = 20 , seed = 123 ) : assert vecs . dtype == np . float32 assert vecs . ndim == 2 N , D = vecs . shape assert self . Ks < N , "the number of training vector should be more than Ks" assert D % self . M == 0 , "input dimension must be dividable by M" self . Ds = int ( D / self . M ) np . random . seed ( seed ) if self . verbose : print ( "iter: {}, seed: {}" . format ( iter , seed ) ) self . codewords = np . zeros ( ( self . M , self . Ks , self . Ds ) , dtype = np . float32 ) for m in range ( self . M ) : if self . verbose : print ( "Training the subspace: {} / {}" . format ( m , self . M ) ) vecs_sub = vecs [ : , m * self . Ds : ( m + 1 ) * self . Ds ] self . codewords [ m ] , _ = kmeans2 ( vecs_sub , self . Ks , iter = iter , minit = 'points' ) return self
Given training vectors run k - means for each sub - space and create codewords for each sub - space .
250,682
def encode ( self , vecs ) : assert vecs . dtype == np . float32 assert vecs . ndim == 2 N , D = vecs . shape assert D == self . Ds * self . M , "input dimension must be Ds * M" codes = np . empty ( ( N , self . M ) , dtype = self . code_dtype ) for m in range ( self . M ) : if self . verbose : print ( "Encoding the subspace: {} / {}" . format ( m , self . M ) ) vecs_sub = vecs [ : , m * self . Ds : ( m + 1 ) * self . Ds ] codes [ : , m ] , _ = vq ( vecs_sub , self . codewords [ m ] ) return codes
Encode input vectors into PQ - codes .
250,683
def decode ( self , codes ) : assert codes . ndim == 2 N , M = codes . shape assert M == self . M assert codes . dtype == self . code_dtype vecs = np . empty ( ( N , self . Ds * self . M ) , dtype = np . float32 ) for m in range ( self . M ) : vecs [ : , m * self . Ds : ( m + 1 ) * self . Ds ] = self . codewords [ m ] [ codes [ : , m ] , : ] return vecs
Given PQ - codes reconstruct original D - dimensional vectors approximately by fetching the codewords .
250,684
def transaction ( ) : client = default_client ( ) _thread . client = client . pipeline ( ) try : yield _thread . client . execute ( ) finally : _thread . client = client
Swaps out the current client with a pipeline instance so that each Redis method call inside the context will be pipelined . Once the context is exited we execute the pipeline .
250,685
def _get_lua_path ( self , name ) : parts = ( os . path . dirname ( os . path . abspath ( __file__ ) ) , "lua" , name ) return os . path . join ( * parts )
Joins the given name with the relative path of the module .
250,686
def _create_lua_method ( self , name , code ) : script = self . register_script ( code ) setattr ( script , "name" , name ) method = lambda key , * a , ** k : script ( keys = [ key ] , args = a , ** k ) setattr ( self , name , method )
Registers the code snippet as a Lua script and binds the script to the client as a method that can be called with the same signature as regular client methods eg with a single key arg .
250,687
def value_left ( self , other ) : return other . value if isinstance ( other , self . __class__ ) else other
Returns the value of the other type instance to use in an operator method namely when the method s instance is on the left side of the expression .
250,688
def value_right ( self , other ) : return self if isinstance ( other , self . __class__ ) else self . value
Returns the value of the type instance calling an to use in an operator method namely when the method s instance is on the right side of the expression .
250,689
def op_left ( op ) : def method ( self , other ) : return op ( self . value , value_left ( self , other ) ) return method
Returns a type instance method for the given operator applied when the instance appears on the left side of the expression .
250,690
def op_right ( op ) : def method ( self , other ) : return op ( value_left ( self , other ) , value_right ( self , other ) ) return method
Returns a type instance method for the given operator applied when the instance appears on the right side of the expression .
250,691
def on ( self , event , f = None ) : def _on ( f ) : self . _add_event_handler ( event , f , f ) return f if f is None : return _on else : return _on ( f )
Registers the function f to the event name event .
250,692
def once ( self , event , f = None ) : def _wrapper ( f ) : def g ( * args , ** kwargs ) : self . remove_listener ( event , f ) return f ( * args , ** kwargs ) self . _add_event_handler ( event , f , g ) return f if f is None : return _wrapper else : return _wrapper ( f )
The same as ee . on except that the listener is automatically removed after being called .
250,693
def remove_all_listeners ( self , event = None ) : if event is not None : self . _events [ event ] = OrderedDict ( ) else : self . _events = defaultdict ( OrderedDict )
Remove all listeners attached to event . If event is None remove all listeners on all events .
250,694
def offsetcopy ( s , newoffset ) : assert 0 <= newoffset < 8 if not s . bitlength : return copy . copy ( s ) else : if newoffset == s . offset % 8 : return ByteStore ( s . getbyteslice ( s . byteoffset , s . byteoffset + s . bytelength ) , s . bitlength , newoffset ) newdata = [ ] d = s . _rawarray assert newoffset != s . offset % 8 if newoffset < s . offset % 8 : shiftleft = s . offset % 8 - newoffset for x in range ( s . byteoffset , s . byteoffset + s . bytelength - 1 ) : newdata . append ( ( ( d [ x ] << shiftleft ) & 0xff ) + ( d [ x + 1 ] >> ( 8 - shiftleft ) ) ) bits_in_last_byte = ( s . offset + s . bitlength ) % 8 if not bits_in_last_byte : bits_in_last_byte = 8 if bits_in_last_byte > shiftleft : newdata . append ( ( d [ s . byteoffset + s . bytelength - 1 ] << shiftleft ) & 0xff ) else : shiftright = newoffset - s . offset % 8 newdata . append ( s . getbyte ( 0 ) >> shiftright ) for x in range ( s . byteoffset + 1 , s . byteoffset + s . bytelength ) : newdata . append ( ( ( d [ x - 1 ] << ( 8 - shiftright ) ) & 0xff ) + ( d [ x ] >> shiftright ) ) bits_in_last_byte = ( s . offset + s . bitlength ) % 8 if not bits_in_last_byte : bits_in_last_byte = 8 if bits_in_last_byte + shiftright > 8 : newdata . append ( ( d [ s . byteoffset + s . bytelength - 1 ] << ( 8 - shiftright ) ) & 0xff ) new_s = ByteStore ( bytearray ( newdata ) , s . bitlength , newoffset ) assert new_s . offset == newoffset return new_s
Return a copy of a ByteStore with the newoffset .
250,695
def structparser ( token ) : m = STRUCT_PACK_RE . match ( token ) if not m : return [ token ] else : endian = m . group ( 'endian' ) if endian is None : return [ token ] formatlist = re . findall ( STRUCT_SPLIT_RE , m . group ( 'fmt' ) ) fmt = '' . join ( [ f [ - 1 ] * int ( f [ : - 1 ] ) if len ( f ) != 1 else f for f in formatlist ] ) if endian == '@' : if byteorder == 'little' : endian = '<' else : assert byteorder == 'big' endian = '>' if endian == '<' : tokens = [ REPLACEMENTS_LE [ c ] for c in fmt ] else : assert endian == '>' tokens = [ REPLACEMENTS_BE [ c ] for c in fmt ] return tokens
Parse struct - like format string token into sub - token list .
250,696
def tokenparser ( fmt , keys = None , token_cache = { } ) : try : return token_cache [ ( fmt , keys ) ] except KeyError : token_key = ( fmt , keys ) fmt = expand_brackets ( fmt ) meta_tokens = ( '' . join ( f . split ( ) ) for f in fmt . split ( ',' ) ) return_values = [ ] stretchy_token = False for meta_token in meta_tokens : m = MULTIPLICATIVE_RE . match ( meta_token ) if not m : factor = 1 else : factor = int ( m . group ( 'factor' ) ) meta_token = m . group ( 'token' ) tokens = structparser ( meta_token ) ret_vals = [ ] for token in tokens : if keys and token in keys : ret_vals . append ( [ token , None , None ] ) continue value = length = None if token == '' : continue m = LITERAL_RE . match ( token ) if m : name = m . group ( 'name' ) value = m . group ( 'value' ) ret_vals . append ( [ name , length , value ] ) continue m1 = TOKEN_RE . match ( token ) if not m1 : m2 = DEFAULT_UINT . match ( token ) if not m2 : raise ValueError ( "Don't understand token '{0}'." . format ( token ) ) if m1 : name = m1 . group ( 'name' ) length = m1 . group ( 'len' ) if m1 . group ( 'value' ) : value = m1 . group ( 'value' ) else : assert m2 name = 'uint' length = m2 . group ( 'len' ) if m2 . group ( 'value' ) : value = m2 . group ( 'value' ) if name == 'bool' : if length is not None : raise ValueError ( "You can't specify a length with bool tokens - they are always one bit." ) length = 1 if length is None and name not in ( 'se' , 'ue' , 'sie' , 'uie' ) : stretchy_token = True if length is not None : try : length = int ( length ) if length < 0 : raise Error if name == 'bytes' : length *= 8 except Error : raise ValueError ( "Can't read a token with a negative length." ) except ValueError : if not keys or length not in keys : raise ValueError ( "Don't understand length '{0}' of token." . format ( length ) ) ret_vals . append ( [ name , length , value ] ) return_values . extend ( ret_vals * factor ) return_values = [ tuple ( x ) for x in return_values ] if len ( token_cache ) < CACHE_SIZE : token_cache [ token_key ] = stretchy_token , return_values return stretchy_token , return_values
Divide the format string into tokens and parse them .
250,697
def expand_brackets ( s ) : s = '' . join ( s . split ( ) ) while True : start = s . find ( '(' ) if start == - 1 : break count = 1 p = start + 1 while p < len ( s ) : if s [ p ] == '(' : count += 1 if s [ p ] == ')' : count -= 1 if not count : break p += 1 if count : raise ValueError ( "Unbalanced parenthesis in '{0}'." . format ( s ) ) if start == 0 or s [ start - 1 ] != '*' : s = s [ 0 : start ] + s [ start + 1 : p ] + s [ p + 1 : ] else : m = BRACKET_RE . search ( s ) if m : factor = int ( m . group ( 'factor' ) ) matchstart = m . start ( 'factor' ) s = s [ 0 : matchstart ] + ( factor - 1 ) * ( s [ start + 1 : p ] + ',' ) + s [ start + 1 : p ] + s [ p + 1 : ] else : raise ValueError ( "Failed to parse '{0}'." . format ( s ) ) return s
Remove whitespace and expand all brackets .
250,698
def pack ( fmt , * values , ** kwargs ) : tokens = [ ] if isinstance ( fmt , basestring ) : fmt = [ fmt ] try : for f_item in fmt : _ , tkns = tokenparser ( f_item , tuple ( sorted ( kwargs . keys ( ) ) ) ) tokens . extend ( tkns ) except ValueError as e : raise CreationError ( * e . args ) value_iter = iter ( values ) s = BitStream ( ) try : for name , length , value in tokens : if value in kwargs : value = kwargs [ value ] if length in kwargs : length = kwargs [ length ] if name in kwargs and length is None and value is None : s . append ( kwargs [ name ] ) continue if length is not None : length = int ( length ) if value is None and name != 'pad' : value = next ( value_iter ) s . _append ( BitStream . _init_with_token ( name , length , value ) ) except StopIteration : raise CreationError ( "Not enough parameters present to pack according to the " "format. {0} values are needed." , len ( tokens ) ) try : next ( value_iter ) except StopIteration : return s raise CreationError ( "Too many parameters present to pack according to the format." )
Pack the values according to the format string and return a new BitStream .
250,699
def getbyteslice ( self , start , end ) : c = self . _rawarray [ start : end ] return c
Direct access to byte data .