idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
227,500
def remove_port_callback ( self , port , cb ) : logger . debug ( 'Removing callback on port [%d] to [%s]' , port , cb ) for port_callback in self . cb : if port_callback . port == port and port_callback . callback == cb : self . cb . remove ( port_callback )
Remove a callback for data that comes on a specific port
79
11
227,501
def add_variable ( self , name , fetch_as = None ) : if fetch_as : self . variables . append ( LogVariable ( name , fetch_as ) ) else : # We cannot determine the default type until we have connected. So # save the name and we will add these once we are connected. self . default_fetch_as . append ( name )
Add a new variable to the configuration .
79
8
227,502
def add_memory ( self , name , fetch_as , stored_as , address ) : self . variables . append ( LogVariable ( name , fetch_as , LogVariable . MEM_TYPE , stored_as , address ) )
Add a raw memory position to log .
49
8
227,503
def create ( self ) : pk = CRTPPacket ( ) pk . set_header ( 5 , CHAN_SETTINGS ) if self . useV2 : pk . data = ( CMD_CREATE_BLOCK_V2 , self . id ) else : pk . data = ( CMD_CREATE_BLOCK , self . id ) for var in self . variables : if ( var . is_toc_variable ( ) is False ) : # Memory location logger . debug ( 'Logging to raw memory %d, 0x%04X' , var . get_storage_and_fetch_byte ( ) , var . address ) pk . data . append ( struct . pack ( '<B' , var . get_storage_and_fetch_byte ( ) ) ) pk . data . append ( struct . pack ( '<I' , var . address ) ) else : # Item in TOC logger . debug ( 'Adding %s with id=%d and type=0x%02X' , var . name , self . cf . log . toc . get_element_id ( var . name ) , var . get_storage_and_fetch_byte ( ) ) pk . data . append ( var . get_storage_and_fetch_byte ( ) ) if self . useV2 : ident = self . cf . log . toc . get_element_id ( var . name ) pk . data . append ( ident & 0x0ff ) pk . data . append ( ( ident >> 8 ) & 0x0ff ) else : pk . data . append ( self . cf . log . toc . get_element_id ( var . name ) ) logger . debug ( 'Adding log block id {}' . format ( self . id ) ) if self . useV2 : self . cf . send_packet ( pk , expected_reply = ( CMD_CREATE_BLOCK_V2 , self . id ) ) else : self . cf . send_packet ( pk , expected_reply = ( CMD_CREATE_BLOCK , self . id ) )
Save the log configuration in the Crazyflie
470
9
227,504
def start ( self ) : if ( self . cf . link is not None ) : if ( self . _added is False ) : self . create ( ) logger . debug ( 'First time block is started, add block' ) else : logger . debug ( 'Block already registered, starting logging' ' for id=%d' , self . id ) pk = CRTPPacket ( ) pk . set_header ( 5 , CHAN_SETTINGS ) pk . data = ( CMD_START_LOGGING , self . id , self . period ) self . cf . send_packet ( pk , expected_reply = ( CMD_START_LOGGING , self . id ) )
Start the logging for this entry
154
6
227,505
def stop ( self ) : if ( self . cf . link is not None ) : if ( self . id is None ) : logger . warning ( 'Stopping block, but no block registered' ) else : logger . debug ( 'Sending stop logging for block id=%d' , self . id ) pk = CRTPPacket ( ) pk . set_header ( 5 , CHAN_SETTINGS ) pk . data = ( CMD_STOP_LOGGING , self . id ) self . cf . send_packet ( pk , expected_reply = ( CMD_STOP_LOGGING , self . id ) )
Stop the logging for this entry
141
6
227,506
def delete ( self ) : if ( self . cf . link is not None ) : if ( self . id is None ) : logger . warning ( 'Delete block, but no block registered' ) else : logger . debug ( 'LogEntry: Sending delete logging for block id=%d' % self . id ) pk = CRTPPacket ( ) pk . set_header ( 5 , CHAN_SETTINGS ) pk . data = ( CMD_DELETE_BLOCK , self . id ) self . cf . send_packet ( pk , expected_reply = ( CMD_DELETE_BLOCK , self . id ) )
Delete this entry in the Crazyflie
142
8
227,507
def unpack_log_data ( self , log_data , timestamp ) : ret_data = { } data_index = 0 for var in self . variables : size = LogTocElement . get_size_from_id ( var . fetch_as ) name = var . name unpackstring = LogTocElement . get_unpack_string_from_id ( var . fetch_as ) value = struct . unpack ( unpackstring , log_data [ data_index : data_index + size ] ) [ 0 ] data_index += size ret_data [ name ] = value self . data_received_cb . call ( timestamp , ret_data , self )
Unpack received logging data so it represent real values according to the configuration in the entry
147
17
227,508
def get_id_from_cstring ( name ) : for key in list ( LogTocElement . types . keys ( ) ) : if ( LogTocElement . types [ key ] [ 0 ] == name ) : return key raise KeyError ( 'Type [%s] not found in LogTocElement.types!' % name )
Return variable type id given the C - storage name
73
10
227,509
def add_config ( self , logconf ) : if not self . cf . link : logger . error ( 'Cannot add configs without being connected to a ' 'Crazyflie!' ) return # If the log configuration contains variables that we added without # type (i.e we want the stored as type for fetching as well) then # resolve this now and add them to the block again. for name in logconf . default_fetch_as : var = self . toc . get_element_by_complete_name ( name ) if not var : logger . warning ( '%s not in TOC, this block cannot be used!' , name ) logconf . valid = False raise KeyError ( 'Variable {} not in TOC' . format ( name ) ) # Now that we know what type this variable has, add it to the log # config again with the correct type logconf . add_variable ( name , var . ctype ) # Now check that all the added variables are in the TOC and that # the total size constraint of a data packet with logging data is # not size = 0 for var in logconf . variables : size += LogTocElement . get_size_from_id ( var . fetch_as ) # Check that we are able to find the variable in the TOC so # we can return error already now and not when the config is sent if var . is_toc_variable ( ) : if ( self . toc . get_element_by_complete_name ( var . name ) is None ) : logger . warning ( 'Log: %s not in TOC, this block cannot be used!' , var . name ) logconf . valid = False raise KeyError ( 'Variable {} not in TOC' . format ( var . name ) ) if ( size <= MAX_LOG_DATA_PACKET_SIZE and ( logconf . period > 0 and logconf . period < 0xFF ) ) : logconf . valid = True logconf . cf = self . cf logconf . id = self . _config_id_counter logconf . useV2 = self . _useV2 self . _config_id_counter = ( self . _config_id_counter + 1 ) % 255 self . log_blocks . append ( logconf ) self . block_added_cb . call ( logconf ) else : logconf . valid = False raise AttributeError ( 'The log configuration is too large or has an invalid ' 'parameter' )
Add a log configuration to the logging framework .
533
9
227,510
def refresh_toc ( self , refresh_done_callback , toc_cache ) : self . _useV2 = self . cf . platform . get_protocol_version ( ) >= 4 self . _toc_cache = toc_cache self . _refresh_callback = refresh_done_callback self . toc = None pk = CRTPPacket ( ) pk . set_header ( CRTPPort . LOGGING , CHAN_SETTINGS ) pk . data = ( CMD_RESET_LOGGING , ) self . cf . send_packet ( pk , expected_reply = ( CMD_RESET_LOGGING , ) )
Start refreshing the table of loggale variables
148
9
227,511
def init_drivers ( enable_debug_driver = False ) : for driver in DRIVERS : try : if driver != DebugDriver or enable_debug_driver : CLASSES . append ( driver ) except Exception : # pylint: disable=W0703 continue
Initialize all the drivers .
55
6
227,512
def scan_interfaces ( address = None ) : available = [ ] found = [ ] for driverClass in CLASSES : try : logger . debug ( 'Scanning: %s' , driverClass ) instance = driverClass ( ) found = instance . scan_interface ( address ) available += found except Exception : raise return available
Scan all the interfaces for available Crazyflies
68
8
227,513
def get_interfaces_status ( ) : status = { } for driverClass in CLASSES : try : instance = driverClass ( ) status [ instance . get_name ( ) ] = instance . get_status ( ) except Exception : raise return status
Get the status of all the interfaces
53
7
227,514
def get_link_driver ( uri , link_quality_callback = None , link_error_callback = None ) : for driverClass in CLASSES : try : instance = driverClass ( ) instance . connect ( uri , link_quality_callback , link_error_callback ) return instance except WrongUriType : continue return None
Return the link driver for the given URI . Returns None if no driver was found for the URI or the URI was not well formatted for the matching driver .
72
31
227,515
def send_short_lpp_packet ( self , dest_id , data ) : pk = CRTPPacket ( ) pk . port = CRTPPort . LOCALIZATION pk . channel = self . GENERIC_CH pk . data = struct . pack ( '<BB' , self . LPS_SHORT_LPP_PACKET , dest_id ) + data self . _cf . send_packet ( pk )
Send ultra - wide - band LPP packet to dest_id
101
13
227,516
def send_velocity_world_setpoint ( self , vx , vy , vz , yawrate ) : pk = CRTPPacket ( ) pk . port = CRTPPort . COMMANDER_GENERIC pk . data = struct . pack ( '<Bffff' , TYPE_VELOCITY_WORLD , vx , vy , vz , yawrate ) self . _cf . send_packet ( pk )
Send Velocity in the world frame of reference setpoint .
101
11
227,517
def send_position_setpoint ( self , x , y , z , yaw ) : pk = CRTPPacket ( ) pk . port = CRTPPort . COMMANDER_GENERIC pk . data = struct . pack ( '<Bffff' , TYPE_POSITION , x , y , z , yaw ) self . _cf . send_packet ( pk )
Control mode where the position is sent as absolute x y z coordinate in meter and the yaw is the absolute orientation .
86
24
227,518
def type_to_string ( t ) : if t == MemoryElement . TYPE_I2C : return 'I2C' if t == MemoryElement . TYPE_1W : return '1-wire' if t == MemoryElement . TYPE_DRIVER_LED : return 'LED driver' if t == MemoryElement . TYPE_LOCO : return 'Loco Positioning' if t == MemoryElement . TYPE_TRAJ : return 'Trajectory' if t == MemoryElement . TYPE_LOCO2 : return 'Loco Positioning 2' return 'Unknown'
Get string representation of memory type
123
6
227,519
def write_data ( self , write_finished_cb ) : self . _write_finished_cb = write_finished_cb data = bytearray ( ) for led in self . leds : # In order to fit all the LEDs in one radio packet RGB565 is used # to compress the colors. The calculations below converts 3 bytes # RGB into 2 bytes RGB565. Then shifts the value of each color to # LSB, applies the intensity and shifts them back for correct # alignment on 2 bytes. R5 = ( ( int ) ( ( ( ( int ( led . r ) & 0xFF ) * 249 + 1014 ) >> 11 ) & 0x1F ) * led . intensity / 100 ) G6 = ( ( int ) ( ( ( ( int ( led . g ) & 0xFF ) * 253 + 505 ) >> 10 ) & 0x3F ) * led . intensity / 100 ) B5 = ( ( int ) ( ( ( ( int ( led . b ) & 0xFF ) * 249 + 1014 ) >> 11 ) & 0x1F ) * led . intensity / 100 ) tmp = ( int ( R5 ) << 11 ) | ( int ( G6 ) << 5 ) | ( int ( B5 ) << 0 ) data += bytearray ( ( tmp >> 8 , tmp & 0xFF ) ) self . mem_handler . write ( self , 0x00 , data , flush_queue = True )
Write the saved LED - ring data to the Crazyflie
310
12
227,520
def _parse_and_check_elements ( self , data ) : crc = data [ - 1 ] test_crc = crc32 ( data [ : - 1 ] ) & 0x0ff elem_data = data [ 2 : - 1 ] if test_crc == crc : while len ( elem_data ) > 0 : ( eid , elen ) = struct . unpack ( 'BB' , elem_data [ : 2 ] ) self . elements [ self . element_mapping [ eid ] ] = elem_data [ 2 : 2 + elen ] . decode ( 'ISO-8859-1' ) elem_data = elem_data [ 2 + elen : ] return True return False
Parse and check the CRC and length of the elements part of the memory
163
15
227,521
def _parse_and_check_header ( self , data ) : ( start , self . pins , self . vid , self . pid , crc ) = struct . unpack ( '<BIBBB' , data ) test_crc = crc32 ( data [ : - 1 ] ) & 0x0ff if start == 0xEB and crc == test_crc : return True return False
Parse and check the CRC of the header part of the memory
89
13
227,522
def update_id_list ( self , update_ids_finished_cb ) : if not self . _update_ids_finished_cb : self . _update_ids_finished_cb = update_ids_finished_cb self . anchor_ids = [ ] self . active_anchor_ids = [ ] self . anchor_data = { } self . nr_of_anchors = 0 self . ids_valid = False self . data_valid = False logger . debug ( 'Updating ids of memory {}' . format ( self . id ) ) # Start reading the header self . mem_handler . read ( self , LocoMemory2 . ADR_ID_LIST , LocoMemory2 . ID_LIST_LEN )
Request an update of the id list
162
7
227,523
def update_active_id_list ( self , update_active_ids_finished_cb ) : if not self . _update_active_ids_finished_cb : self . _update_active_ids_finished_cb = update_active_ids_finished_cb self . active_anchor_ids = [ ] self . active_ids_valid = False logger . debug ( 'Updating active ids of memory {}' . format ( self . id ) ) # Start reading the header self . mem_handler . read ( self , LocoMemory2 . ADR_ACTIVE_ID_LIST , LocoMemory2 . ID_LIST_LEN )
Request an update of the active id list
143
8
227,524
def update_data ( self , update_data_finished_cb ) : if not self . _update_data_finished_cb and self . nr_of_anchors > 0 : self . _update_data_finished_cb = update_data_finished_cb self . anchor_data = { } self . data_valid = False self . _nr_of_anchors_to_fetch = self . nr_of_anchors logger . debug ( 'Updating anchor data of memory {}' . format ( self . id ) ) # Start reading the first anchor self . _currently_fetching_index = 0 self . _request_page ( self . anchor_ids [ self . _currently_fetching_index ] )
Request an update of the anchor data
162
7
227,525
def write_data ( self , write_finished_cb ) : self . _write_finished_cb = write_finished_cb data = bytearray ( ) for poly4D in self . poly4Ds : data += struct . pack ( '<ffffffff' , * poly4D . x . values ) data += struct . pack ( '<ffffffff' , * poly4D . y . values ) data += struct . pack ( '<ffffffff' , * poly4D . z . values ) data += struct . pack ( '<ffffffff' , * poly4D . yaw . values ) data += struct . pack ( '<f' , poly4D . duration ) self . mem_handler . write ( self , 0x00 , data , flush_queue = True )
Write trajectory data to the Crazyflie
171
8
227,526
def get_mem ( self , id ) : for m in self . mems : if m . id == id : return m return None
Fetch the memory with the supplied id
29
8
227,527
def get_mems ( self , type ) : ret = ( ) for m in self . mems : if m . type == type : ret += ( m , ) return ret
Fetch all the memories of the supplied type
38
9
227,528
def write ( self , memory , addr , data , flush_queue = False ) : wreq = _WriteRequest ( memory , addr , data , self . cf ) if memory . id not in self . _write_requests : self . _write_requests [ memory . id ] = [ ] # Workaround until we secure the uplink and change messages for # mems to non-blocking self . _write_requests_lock . acquire ( ) if flush_queue : self . _write_requests [ memory . id ] = self . _write_requests [ memory . id ] [ : 1 ] self . _write_requests [ memory . id ] . insert ( len ( self . _write_requests ) , wreq ) if len ( self . _write_requests [ memory . id ] ) == 1 : wreq . start ( ) self . _write_requests_lock . release ( ) return True
Write the specified data to the given memory at the given address
200
12
227,529
def read ( self , memory , addr , length ) : if memory . id in self . _read_requests : logger . warning ( 'There is already a read operation ongoing for ' 'memory id {}' . format ( memory . id ) ) return False rreq = _ReadRequest ( memory , addr , length , self . cf ) self . _read_requests [ memory . id ] = rreq rreq . start ( ) return True
Read the specified amount of bytes from the given memory at the given address
94
14
227,530
def refresh ( self , refresh_done_callback ) : self . _refresh_callback = refresh_done_callback self . _fetch_id = 0 for m in self . mems : try : self . mem_read_cb . remove_callback ( m . new_data ) m . disconnect ( ) except Exception as e : logger . info ( 'Error when removing memory after update: {}' . format ( e ) ) self . mems = [ ] self . nbr_of_mems = 0 self . _getting_count = False logger . debug ( 'Requesting number of memories' ) pk = CRTPPacket ( ) pk . set_header ( CRTPPort . MEM , CHAN_INFO ) pk . data = ( CMD_INFO_NBR , ) self . cf . send_packet ( pk , expected_reply = ( CMD_INFO_NBR , ) )
Start fetching all the detected memories
198
7
227,531
def reset_to_bootloader1 ( self , cpu_id ) : # Send an echo request and wait for the answer # Mainly aim to bypass a bug of the crazyflie firmware that prevents # reset before normal CRTP communication pk = CRTPPacket ( ) pk . port = CRTPPort . LINKCTRL pk . data = ( 1 , 2 , 3 ) + cpu_id self . link . send_packet ( pk ) pk = None while True : pk = self . link . receive_packet ( 2 ) if not pk : return False if pk . port == CRTPPort . LINKCTRL : break # Send the reset to bootloader request pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = ( 0xFF , 0xFE ) + cpu_id self . link . send_packet ( pk ) # Wait to ack the reset ... pk = None while True : pk = self . link . receive_packet ( 2 ) if not pk : return False if pk . port == 0xFF and tuple ( pk . data ) == ( 0xFF , 0xFE ) + cpu_id : pk . data = ( 0xFF , 0xF0 ) + cpu_id self . link . send_packet ( pk ) break time . sleep ( 0.1 ) self . link . close ( ) self . link = cflib . crtp . get_link_driver ( self . clink_address ) # time.sleep(0.1) return self . _update_info ( )
Reset to the bootloader The parameter cpuid shall correspond to the device to reset .
358
18
227,532
def reset_to_firmware ( self , target_id ) : # The fake CPU ID is legacy from the Crazyflie 1.0 # In order to reset the CPU id had to be sent, but this # was removed before launching it. But the length check is # still in the bootloader. So to work around this bug so # some extra data needs to be sent. fake_cpu_id = ( 1 , 2 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 ) # Send the reset to bootloader request pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = ( target_id , 0xFF ) + fake_cpu_id self . link . send_packet ( pk ) # Wait to ack the reset ... pk = None while True : pk = self . link . receive_packet ( 2 ) if not pk : return False if ( pk . header == 0xFF and struct . unpack ( 'B' * len ( pk . data ) , pk . data ) [ : 2 ] == ( target_id , 0xFF ) ) : # Difference in CF1 and CF2 (CPU ID) if target_id == 0xFE : pk . data = ( target_id , 0xF0 , 0x01 ) else : pk . data = ( target_id , 0xF0 ) + fake_cpu_id self . link . send_packet ( pk ) break time . sleep ( 0.1 )
Reset to firmware The parameter cpuid shall correspond to the device to reset .
342
16
227,533
def check_link_and_get_info ( self , target_id = 0xFF ) : for _ in range ( 0 , 5 ) : if self . _update_info ( target_id ) : if self . _in_boot_cb : self . _in_boot_cb . call ( True , self . targets [ target_id ] . protocol_version ) if self . _info_cb : self . _info_cb . call ( self . targets [ target_id ] ) return True return False
Try to get a connection with the bootloader by requesting info 5 times . This let roughly 10 seconds to boot the copter ...
111
26
227,534
def _update_info ( self , target_id ) : # Call getInfo ... pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = ( target_id , 0x10 ) self . link . send_packet ( pk ) # Wait for the answer pk = self . link . receive_packet ( 2 ) if ( pk and pk . header == 0xFF and struct . unpack ( '<BB' , pk . data [ 0 : 2 ] ) == ( target_id , 0x10 ) ) : tab = struct . unpack ( 'BBHHHH' , pk . data [ 0 : 10 ] ) cpuid = struct . unpack ( 'B' * 12 , pk . data [ 10 : 22 ] ) if target_id not in self . targets : self . targets [ target_id ] = Target ( target_id ) self . targets [ target_id ] . addr = target_id if len ( pk . data ) > 22 : self . targets [ target_id ] . protocol_version = pk . datat [ 22 ] self . protocol_version = pk . datat [ 22 ] self . targets [ target_id ] . page_size = tab [ 2 ] self . targets [ target_id ] . buffer_pages = tab [ 3 ] self . targets [ target_id ] . flash_pages = tab [ 4 ] self . targets [ target_id ] . start_page = tab [ 5 ] self . targets [ target_id ] . cpuid = '%02X' % cpuid [ 0 ] for i in cpuid [ 1 : ] : self . targets [ target_id ] . cpuid += ':%02X' % i if ( self . protocol_version == 0x10 and target_id == TargetTypes . STM32 ) : self . _update_mapping ( target_id ) return True return False
Call the command getInfo and fill up the information received in the fields of the object
424
17
227,535
def upload_buffer ( self , target_id , page , address , buff ) : # print len(buff) count = 0 pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = struct . pack ( '=BBHH' , target_id , 0x14 , page , address ) for i in range ( 0 , len ( buff ) ) : pk . data . append ( buff [ i ] ) count += 1 if count > 24 : self . link . send_packet ( pk ) count = 0 pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = struct . pack ( '=BBHH' , target_id , 0x14 , page , i + address + 1 ) self . link . send_packet ( pk )
Upload data into a buffer on the Crazyflie
191
10
227,536
def read_flash ( self , addr = 0xFF , page = 0x00 ) : buff = bytearray ( ) page_size = self . targets [ addr ] . page_size for i in range ( 0 , int ( math . ceil ( page_size / 25.0 ) ) ) : pk = None retry_counter = 5 while ( ( not pk or pk . header != 0xFF or struct . unpack ( '<BB' , pk . data [ 0 : 2 ] ) != ( addr , 0x1C ) ) and retry_counter >= 0 ) : pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = struct . pack ( '<BBHH' , addr , 0x1C , page , ( i * 25 ) ) self . link . send_packet ( pk ) pk = self . link . receive_packet ( 1 ) retry_counter -= 1 if ( retry_counter < 0 ) : return None else : buff += pk . data [ 6 : ] # For some reason we get one byte extra here... return buff [ 0 : page_size ]
Read back a flash page from the Crazyflie and return it
261
13
227,537
def write_flash ( self , addr , page_buffer , target_page , page_count ) : # print "Write page", flashPage # print "Writing page [%d] and [%d] forward" % (flashPage, nPage) pk = None # Flushing downlink ... pk = self . link . receive_packet ( 0 ) while pk is not None : pk = self . link . receive_packet ( 0 ) retry_counter = 5 # print "Flasing to 0x{:X}".format(addr) while ( ( not pk or pk . header != 0xFF or struct . unpack ( '<BB' , pk . data [ 0 : 2 ] ) != ( addr , 0x18 ) ) and retry_counter >= 0 ) : pk = CRTPPacket ( ) pk . set_header ( 0xFF , 0xFF ) pk . data = struct . pack ( '<BBHHH' , addr , 0x18 , page_buffer , target_page , page_count ) self . link . send_packet ( pk ) pk = self . link . receive_packet ( 1 ) retry_counter -= 1 if retry_counter < 0 : self . error_code = - 1 return False self . error_code = pk . data [ 3 ] return pk . data [ 2 ] == 1
Initiate flashing of data in the buffer to flash .
306
12
227,538
def decode_cpu_id ( self , cpuid ) : ret = ( ) for i in cpuid . split ( ':' ) : ret += ( eval ( '0x' + i ) , ) return ret
Decode the CPU id into a string
45
8
227,539
def set_header ( self , port , channel ) : self . _port = port self . channel = channel self . _update_header ( )
Set the port and channel for this packet .
31
9
227,540
def _set_data ( self , data ) : if type ( data ) == bytearray : self . _data = data elif type ( data ) == str : if sys . version_info < ( 3 , ) : self . _data = bytearray ( data ) else : self . _data = bytearray ( data . encode ( 'ISO-8859-1' ) ) elif type ( data ) == list or type ( data ) == tuple : self . _data = bytearray ( data ) elif sys . version_info >= ( 3 , ) and type ( data ) == bytes : self . _data = bytearray ( data ) else : raise Exception ( 'Data must be bytearray, string, list or tuple,' ' not {}' . format ( type ( data ) ) )
Set the packet data
180
4
227,541
def take_off ( self , height = None , velocity = VELOCITY ) : if self . _is_flying : raise Exception ( 'Already flying' ) if not self . _cf . is_connected ( ) : raise Exception ( 'Crazyflie is not connected' ) self . _is_flying = True self . _reset_position_estimator ( ) self . _thread = _SetPointThread ( self . _cf ) self . _thread . start ( ) if height is None : height = self . default_height self . up ( height , velocity )
Takes off that is starts the motors goes straigt up and hovers . Do not call this function if you use the with keyword . Take off is done automatically when the context is created .
124
40
227,542
def turn_left ( self , angle_degrees , rate = RATE ) : flight_time = angle_degrees / rate self . start_turn_left ( rate ) time . sleep ( flight_time ) self . stop ( )
Turn to the left staying on the spot
51
8
227,543
def turn_right ( self , angle_degrees , rate = RATE ) : flight_time = angle_degrees / rate self . start_turn_right ( rate ) time . sleep ( flight_time ) self . stop ( )
Turn to the right staying on the spot
51
8
227,544
def circle_left ( self , radius_m , velocity = VELOCITY , angle_degrees = 360.0 ) : distance = 2 * radius_m * math . pi * angle_degrees / 360.0 flight_time = distance / velocity self . start_circle_left ( radius_m , velocity ) time . sleep ( flight_time ) self . stop ( )
Go in circle counter clock wise
82
6
227,545
def circle_right ( self , radius_m , velocity = VELOCITY , angle_degrees = 360.0 ) : distance = 2 * radius_m * math . pi * angle_degrees / 360.0 flight_time = distance / velocity self . start_circle_right ( radius_m , velocity ) time . sleep ( flight_time ) self . stop ( )
Go in circle clock wise
82
5
227,546
def start_circle_left ( self , radius_m , velocity = VELOCITY ) : circumference = 2 * radius_m * math . pi rate = 360.0 * velocity / circumference self . _set_vel_setpoint ( velocity , 0.0 , 0.0 , - rate )
Start a circular motion to the left . This function returns immediately .
64
13
227,547
def start_linear_motion ( self , velocity_x_m , velocity_y_m , velocity_z_m ) : self . _set_vel_setpoint ( velocity_x_m , velocity_y_m , velocity_z_m , 0.0 )
Start a linear motion . This function returns immediately .
60
10
227,548
def set_vel_setpoint ( self , velocity_x , velocity_y , velocity_z , rate_yaw ) : self . _queue . put ( ( velocity_x , velocity_y , velocity_z , rate_yaw ) )
Set the velocity setpoint to use for the future motion
54
11
227,549
def _param_callback ( self , name , value ) : print ( '{0}: {1}' . format ( name , value ) ) # Remove each parameter from the list and close the link when # all are fetched self . _param_check_list . remove ( name ) if len ( self . _param_check_list ) == 0 : print ( 'Have fetched all parameter values.' ) # First remove all the group callbacks for g in self . _param_groups : self . _cf . param . remove_update_callback ( group = g , cb = self . _param_callback ) # Create a new random value [0.00,1.00] for pid_attitude.pitch_kd # and set it pkd = random . random ( ) print ( '' ) print ( 'Write: pid_attitude.pitch_kd={:.2f}' . format ( pkd ) ) self . _cf . param . add_update_callback ( group = 'pid_attitude' , name = 'pitch_kd' , cb = self . _a_pitch_kd_callback ) # When setting a value the parameter is automatically read back # and the registered callbacks will get the updated value self . _cf . param . set_value ( 'pid_attitude.pitch_kd' , '{:.2f}' . format ( pkd ) )
Generic callback registered for all the groups
315
7
227,550
def _a_pitch_kd_callback ( self , name , value ) : print ( 'Readback: {0}={1}' . format ( name , value ) ) # End the example by closing the link (will cause the app to quit) self . _cf . close_link ( )
Callback for pid_attitude . pitch_kd
66
11
227,551
def _scan_radio_channels ( self , cradio , start = 0 , stop = 125 ) : return list ( cradio . scan_channels ( start , stop , ( 0xff , ) ) )
Scan for Crazyflies between the supplied channels .
47
9
227,552
def main ( client_secrets , scope , save , credentials , headless ) : flow = google_auth_oauthlib . flow . InstalledAppFlow . from_client_secrets_file ( client_secrets , scopes = scope ) if not headless : creds = flow . run_local_server ( ) else : creds = flow . run_console ( ) creds_data = { 'token' : creds . token , 'refresh_token' : creds . refresh_token , 'token_uri' : creds . token_uri , 'client_id' : creds . client_id , 'client_secret' : creds . client_secret , 'scopes' : creds . scopes } if save : del creds_data [ 'token' ] config_path = os . path . dirname ( credentials ) if config_path and not os . path . isdir ( config_path ) : os . makedirs ( config_path ) with open ( credentials , 'w' ) as outfile : json . dump ( creds_data , outfile ) click . echo ( 'credentials saved: %s' % credentials ) else : click . echo ( json . dumps ( creds_data ) )
Command - line tool for obtaining authorization and credentials from a user .
273
13
227,553
def authorization_url ( self , * * kwargs ) : kwargs . setdefault ( 'access_type' , 'offline' ) url , state = self . oauth2session . authorization_url ( self . client_config [ 'auth_uri' ] , * * kwargs ) return url , state
Generates an authorization URL .
70
6
227,554
def fetch_token ( self , * * kwargs ) : kwargs . setdefault ( 'client_secret' , self . client_config [ 'client_secret' ] ) return self . oauth2session . fetch_token ( self . client_config [ 'token_uri' ] , * * kwargs )
Completes the Authorization Flow and obtains an access token .
71
13
227,555
def run_console ( self , authorization_prompt_message = _DEFAULT_AUTH_PROMPT_MESSAGE , authorization_code_message = _DEFAULT_AUTH_CODE_MESSAGE , * * kwargs ) : kwargs . setdefault ( 'prompt' , 'consent' ) self . redirect_uri = self . _OOB_REDIRECT_URI auth_url , _ = self . authorization_url ( * * kwargs ) print ( authorization_prompt_message . format ( url = auth_url ) ) code = input ( authorization_code_message ) self . fetch_token ( code = code ) return self . credentials
Run the flow using the console strategy .
152
8
227,556
def run_local_server ( self , host = 'localhost' , port = 8080 , authorization_prompt_message = _DEFAULT_AUTH_PROMPT_MESSAGE , success_message = _DEFAULT_WEB_SUCCESS_MESSAGE , open_browser = True , * * kwargs ) : self . redirect_uri = 'http://{}:{}/' . format ( host , port ) auth_url , _ = self . authorization_url ( * * kwargs ) wsgi_app = _RedirectWSGIApp ( success_message ) local_server = wsgiref . simple_server . make_server ( host , port , wsgi_app , handler_class = _WSGIRequestHandler ) if open_browser : webbrowser . open ( auth_url , new = 1 , autoraise = True ) print ( authorization_prompt_message . format ( url = auth_url ) ) local_server . handle_request ( ) # Note: using https here because oauthlib is very picky that # OAuth 2.0 should only occur over https. authorization_response = wsgi_app . last_request_uri . replace ( 'http' , 'https' ) self . fetch_token ( authorization_response = authorization_response ) return self . credentials
Run the flow using the server strategy .
293
8
227,557
def install ( self , opener ) : # type: (Union[Type[Opener], Opener, Callable[[], Opener]]) -> None _opener = opener if isinstance ( opener , Opener ) else opener ( ) assert isinstance ( _opener , Opener ) , "Opener instance required" assert _opener . protocols , "must list one or more protocols" for protocol in _opener . protocols : self . _protocols [ protocol ] = _opener return opener
Install an opener .
107
4
227,558
def get_opener ( self , protocol ) : # type: (Text) -> Opener protocol = protocol or self . default_opener if self . load_extern : entry_point = next ( pkg_resources . iter_entry_points ( "fs.opener" , protocol ) , None ) else : entry_point = None # If not entry point was loaded from the extensions, try looking # into the registered protocols if entry_point is None : if protocol in self . _protocols : opener_instance = self . _protocols [ protocol ] else : raise UnsupportedProtocol ( "protocol '{}' is not supported" . format ( protocol ) ) # If an entry point was found in an extension, attempt to load it else : try : opener = entry_point . load ( ) except Exception as exception : raise EntryPointError ( "could not load entry point; {}" . format ( exception ) ) if not issubclass ( opener , Opener ) : raise EntryPointError ( "entry point did not return an opener" ) try : opener_instance = opener ( ) except Exception as exception : raise EntryPointError ( "could not instantiate opener; {}" . format ( exception ) ) return opener_instance
Get the opener class associated to a given protocol .
265
10
227,559
def open ( self , fs_url , # type: Text writeable = True , # type: bool create = False , # type: bool cwd = "." , # type: Text default_protocol = "osfs" , # type: Text ) : # type: (...) -> Tuple[FS, Text] if "://" not in fs_url : # URL may just be a path fs_url = "{}://{}" . format ( default_protocol , fs_url ) parse_result = parse_fs_url ( fs_url ) protocol = parse_result . protocol open_path = parse_result . path opener = self . get_opener ( protocol ) open_fs = opener . open_fs ( fs_url , parse_result , writeable , create , cwd ) return open_fs , open_path
Open a filesystem from a FS URL .
182
8
227,560
def manage_fs ( self , fs_url , # type: Union[FS, Text] create = False , # type: bool writeable = False , # type: bool cwd = "." , # type: Text ) : # type: (...) -> Iterator[FS] from . . base import FS if isinstance ( fs_url , FS ) : yield fs_url else : _fs = self . open_fs ( fs_url , create = create , writeable = writeable , cwd = cwd ) try : yield _fs except : raise finally : _fs . close ( )
Get a context manager to open and close a filesystem .
128
11
227,561
def copy_fs ( src_fs , # type: Union[FS, Text] dst_fs , # type: Union[FS, Text] walker = None , # type: Optional[Walker] on_copy = None , # type: Optional[_OnCopy] workers = 0 , # type: int ) : # type: (...) -> None return copy_dir ( src_fs , "/" , dst_fs , "/" , walker = walker , on_copy = on_copy , workers = workers )
Copy the contents of one filesystem to another .
111
9
227,562
def copy_fs_if_newer ( src_fs , # type: Union[FS, Text] dst_fs , # type: Union[FS, Text] walker = None , # type: Optional[Walker] on_copy = None , # type: Optional[_OnCopy] workers = 0 , # type: int ) : # type: (...) -> None return copy_dir_if_newer ( src_fs , "/" , dst_fs , "/" , walker = walker , on_copy = on_copy , workers = workers )
Copy the contents of one filesystem to another checking times .
121
11
227,563
def _source_is_newer ( src_fs , src_path , dst_fs , dst_path ) : # type: (FS, Text, FS, Text) -> bool try : if dst_fs . exists ( dst_path ) : namespace = ( "details" , "modified" ) src_modified = src_fs . getinfo ( src_path , namespace ) . modified if src_modified is not None : dst_modified = dst_fs . getinfo ( dst_path , namespace ) . modified return dst_modified is None or src_modified > dst_modified return True except FSError : # pragma: no cover # todo: should log something here return True
Determine if source file is newer than destination file .
149
12
227,564
def copy_file ( src_fs , # type: Union[FS, Text] src_path , # type: Text dst_fs , # type: Union[FS, Text] dst_path , # type: Text ) : # type: (...) -> None with manage_fs ( src_fs , writeable = False ) as _src_fs : with manage_fs ( dst_fs , create = True ) as _dst_fs : if _src_fs is _dst_fs : # Same filesystem, so we can do a potentially optimized # copy _src_fs . copy ( src_path , dst_path , overwrite = True ) else : # Standard copy with _src_fs . lock ( ) , _dst_fs . lock ( ) : if _dst_fs . hassyspath ( dst_path ) : with _dst_fs . openbin ( dst_path , "w" ) as write_file : _src_fs . download ( src_path , write_file ) else : with _src_fs . openbin ( src_path ) as read_file : _dst_fs . upload ( dst_path , read_file )
Copy a file from one filesystem to another .
255
9
227,565
def copy_file_internal ( src_fs , # type: FS src_path , # type: Text dst_fs , # type: FS dst_path , # type: Text ) : # type: (...) -> None if src_fs is dst_fs : # Same filesystem, so we can do a potentially optimized # copy src_fs . copy ( src_path , dst_path , overwrite = True ) elif dst_fs . hassyspath ( dst_path ) : with dst_fs . openbin ( dst_path , "w" ) as write_file : src_fs . download ( src_path , write_file ) else : with src_fs . openbin ( src_path ) as read_file : dst_fs . upload ( dst_path , read_file )
Low level copy that doesn t call manage_fs or lock .
171
13
227,566
def copy_file_if_newer ( src_fs , # type: Union[FS, Text] src_path , # type: Text dst_fs , # type: Union[FS, Text] dst_path , # type: Text ) : # type: (...) -> bool with manage_fs ( src_fs , writeable = False ) as _src_fs : with manage_fs ( dst_fs , create = True ) as _dst_fs : if _src_fs is _dst_fs : # Same filesystem, so we can do a potentially optimized # copy if _source_is_newer ( _src_fs , src_path , _dst_fs , dst_path ) : _src_fs . copy ( src_path , dst_path , overwrite = True ) return True else : return False else : # Standard copy with _src_fs . lock ( ) , _dst_fs . lock ( ) : if _source_is_newer ( _src_fs , src_path , _dst_fs , dst_path ) : copy_file_internal ( _src_fs , src_path , _dst_fs , dst_path ) return True else : return False
Copy a file from one filesystem to another checking times .
265
11
227,567
def copy_dir ( src_fs , # type: Union[FS, Text] src_path , # type: Text dst_fs , # type: Union[FS, Text] dst_path , # type: Text walker = None , # type: Optional[Walker] on_copy = None , # type: Optional[_OnCopy] workers = 0 , # type: int ) : # type: (...) -> None on_copy = on_copy or ( lambda * args : None ) walker = walker or Walker ( ) _src_path = abspath ( normpath ( src_path ) ) _dst_path = abspath ( normpath ( dst_path ) ) def src ( ) : return manage_fs ( src_fs , writeable = False ) def dst ( ) : return manage_fs ( dst_fs , create = True ) from . _bulk import Copier with src ( ) as _src_fs , dst ( ) as _dst_fs : with _src_fs . lock ( ) , _dst_fs . lock ( ) : _thread_safe = is_thread_safe ( _src_fs , _dst_fs ) with Copier ( num_workers = workers if _thread_safe else 0 ) as copier : _dst_fs . makedir ( _dst_path , recreate = True ) for dir_path , dirs , files in walker . walk ( _src_fs , _src_path ) : copy_path = combine ( _dst_path , frombase ( _src_path , dir_path ) ) for info in dirs : _dst_fs . makedir ( info . make_path ( copy_path ) , recreate = True ) for info in files : src_path = info . make_path ( dir_path ) dst_path = info . make_path ( copy_path ) copier . copy ( _src_fs , src_path , _dst_fs , dst_path ) on_copy ( _src_fs , src_path , _dst_fs , dst_path )
Copy a directory from one filesystem to another .
457
9
227,568
def copy_dir_if_newer ( src_fs , # type: Union[FS, Text] src_path , # type: Text dst_fs , # type: Union[FS, Text] dst_path , # type: Text walker = None , # type: Optional[Walker] on_copy = None , # type: Optional[_OnCopy] workers = 0 , # type: int ) : # type: (...) -> None on_copy = on_copy or ( lambda * args : None ) walker = walker or Walker ( ) _src_path = abspath ( normpath ( src_path ) ) _dst_path = abspath ( normpath ( dst_path ) ) def src ( ) : return manage_fs ( src_fs , writeable = False ) def dst ( ) : return manage_fs ( dst_fs , create = True ) from . _bulk import Copier with src ( ) as _src_fs , dst ( ) as _dst_fs : with _src_fs . lock ( ) , _dst_fs . lock ( ) : _thread_safe = is_thread_safe ( _src_fs , _dst_fs ) with Copier ( num_workers = workers if _thread_safe else 0 ) as copier : _dst_fs . makedir ( _dst_path , recreate = True ) namespace = ( "details" , "modified" ) dst_state = { path : info for path , info in walker . info ( _dst_fs , _dst_path , namespace ) if info . is_file } src_state = [ ( path , info ) for path , info in walker . info ( _src_fs , _src_path , namespace ) ] for dir_path , copy_info in src_state : copy_path = combine ( _dst_path , frombase ( _src_path , dir_path ) ) if copy_info . is_dir : _dst_fs . makedir ( copy_path , recreate = True ) elif copy_info . is_file : # dst file is present, try to figure out if copy # is necessary try : src_modified = copy_info . modified dst_modified = dst_state [ dir_path ] . modified except KeyError : do_copy = True else : do_copy = ( src_modified is None or dst_modified is None or src_modified > dst_modified ) if do_copy : copier . copy ( _src_fs , dir_path , _dst_fs , copy_path ) on_copy ( _src_fs , dir_path , _dst_fs , copy_path )
Copy a directory from one filesystem to another checking times .
586
11
227,569
def _parse_ftp_error ( error ) : # type: (ftplib.Error) -> Tuple[Text, Text] code , _ , message = text_type ( error ) . partition ( " " ) return code , message
Extract code and message from ftp error .
52
10
227,570
def _open_ftp ( self ) : # type: () -> FTP ftp = self . fs . _open_ftp ( ) ftp . voidcmd ( str ( "TYPE I" ) ) return ftp
Open an ftp object for the file .
47
9
227,571
def _parse_features ( cls , feat_response ) : # type: (Text) -> Dict[Text, Text] features = { } if feat_response . split ( "-" ) [ 0 ] == "211" : for line in feat_response . splitlines ( ) : if line . startswith ( " " ) : key , _ , value = line [ 1 : ] . partition ( " " ) features [ key ] = value return features
Parse a dict of features from FTP feat response .
98
11
227,572
def _open_ftp ( self ) : # type: () -> FTP _ftp = FTP ( ) _ftp . set_debuglevel ( 0 ) with ftp_errors ( self ) : _ftp . connect ( self . host , self . port , self . timeout ) _ftp . login ( self . user , self . passwd , self . acct ) self . _features = { } try : feat_response = _decode ( _ftp . sendcmd ( "FEAT" ) , "latin-1" ) except error_perm : # pragma: no cover self . encoding = "latin-1" else : self . _features = self . _parse_features ( feat_response ) self . encoding = "utf-8" if "UTF8" in self . _features else "latin-1" if not PY2 : _ftp . file = _ftp . sock . makefile ( # type: ignore "r" , encoding = self . encoding ) _ftp . encoding = self . encoding self . _welcome = _ftp . welcome return _ftp
Open a new ftp object .
242
7
227,573
def ftp_url ( self ) : # type: () -> Text url = ( "ftp://{}" . format ( self . host ) if self . port == 21 else "ftp://{}:{}" . format ( self . host , self . port ) ) return url
Get the FTP url this filesystem will open .
60
9
227,574
def _parse_ftp_time ( cls , time_text ) : # type: (Text) -> Optional[int] try : tm_year = int ( time_text [ 0 : 4 ] ) tm_month = int ( time_text [ 4 : 6 ] ) tm_day = int ( time_text [ 6 : 8 ] ) tm_hour = int ( time_text [ 8 : 10 ] ) tm_min = int ( time_text [ 10 : 12 ] ) tm_sec = int ( time_text [ 12 : 14 ] ) except ValueError : return None epoch_time = calendar . timegm ( ( tm_year , tm_month , tm_day , tm_hour , tm_min , tm_sec ) ) return epoch_time
Parse a time from an ftp directory listing .
177
11
227,575
def write_zip ( src_fs , # type: FS file , # type: Union[Text, BinaryIO] compression = zipfile . ZIP_DEFLATED , # type: int encoding = "utf-8" , # type: Text walker = None , # type: Optional[Walker] ) : # type: (...) -> None _zip = zipfile . ZipFile ( file , mode = "w" , compression = compression , allowZip64 = True ) walker = walker or Walker ( ) with _zip : gen_walk = walker . info ( src_fs , namespaces = [ "details" , "stat" , "access" ] ) for path , info in gen_walk : # Zip names must be relative, directory names must end # with a slash. zip_name = relpath ( path + "/" if info . is_dir else path ) if not six . PY3 : # Python2 expects bytes filenames zip_name = zip_name . encode ( encoding , "replace" ) if info . has_namespace ( "stat" ) : # If the file has a stat namespace, get the # zip time directory from the stat structure st_mtime = info . get ( "stat" , "st_mtime" , None ) _mtime = time . localtime ( st_mtime ) zip_time = _mtime [ 0 : 6 ] # type: ZipTime else : # Otherwise, use the modified time from details # namespace. mt = info . modified or datetime . utcnow ( ) zip_time = ( mt . year , mt . month , mt . day , mt . hour , mt . minute , mt . second ) # NOTE(@althonos): typeshed's `zipfile.py` on declares # ZipInfo.__init__ for Python < 3 ?! zip_info = zipfile . ZipInfo ( zip_name , zip_time ) # type: ignore try : if info . permissions is not None : zip_info . external_attr = info . permissions . mode << 16 except MissingInfoNamespace : pass if info . is_dir : zip_info . external_attr |= 0x10 # This is how to record directories with zipfile _zip . writestr ( zip_info , b"" ) else : # Get a syspath if possible try : sys_path = src_fs . getsyspath ( path ) except NoSysPath : # Write from bytes _zip . writestr ( zip_info , src_fs . readbytes ( path ) ) else : # Write from a file which is (presumably) # more memory efficient _zip . write ( sys_path , zip_name )
Write the contents of a filesystem to a zip file .
577
11
227,576
def write_tar ( src_fs , # type: FS file , # type: Union[Text, BinaryIO] compression = None , # type: Optional[Text] encoding = "utf-8" , # type: Text walker = None , # type: Optional[Walker] ) : # type: (...) -> None type_map = { ResourceType . block_special_file : tarfile . BLKTYPE , ResourceType . character : tarfile . CHRTYPE , ResourceType . directory : tarfile . DIRTYPE , ResourceType . fifo : tarfile . FIFOTYPE , ResourceType . file : tarfile . REGTYPE , ResourceType . socket : tarfile . AREGTYPE , # no type for socket ResourceType . symlink : tarfile . SYMTYPE , ResourceType . unknown : tarfile . AREGTYPE , # no type for unknown } tar_attr = [ ( "uid" , "uid" ) , ( "gid" , "gid" ) , ( "uname" , "user" ) , ( "gname" , "group" ) ] mode = "w:{}" . format ( compression or "" ) if isinstance ( file , ( six . text_type , six . binary_type ) ) : _tar = tarfile . open ( file , mode = mode ) else : _tar = tarfile . open ( fileobj = file , mode = mode ) current_time = time . time ( ) walker = walker or Walker ( ) with _tar : gen_walk = walker . info ( src_fs , namespaces = [ "details" , "stat" , "access" ] ) for path , info in gen_walk : # Tar names must be relative tar_name = relpath ( path ) if not six . PY3 : # Python2 expects bytes filenames tar_name = tar_name . encode ( encoding , "replace" ) tar_info = tarfile . TarInfo ( tar_name ) if info . has_namespace ( "stat" ) : mtime = info . get ( "stat" , "st_mtime" , current_time ) else : mtime = info . modified or current_time if isinstance ( mtime , datetime ) : mtime = datetime_to_epoch ( mtime ) if isinstance ( mtime , float ) : mtime = int ( mtime ) tar_info . mtime = mtime for tarattr , infoattr in tar_attr : if getattr ( info , infoattr , None ) is not None : setattr ( tar_info , tarattr , getattr ( info , infoattr , None ) ) if info . has_namespace ( "access" ) : tar_info . mode = getattr ( info . permissions , "mode" , 0o420 ) if info . is_dir : tar_info . type = tarfile . DIRTYPE _tar . addfile ( tar_info ) else : tar_info . type = type_map . get ( info . type , tarfile . REGTYPE ) tar_info . size = info . size with src_fs . openbin ( path ) as bin_file : _tar . addfile ( tar_info , bin_file )
Write the contents of a filesystem to a tar file .
697
11
227,577
def count_lines ( self ) : # type: () -> LineCounts lines = 0 non_blank = 0 for path , info in self . _make_iter ( ) : if info . is_file : for line in self . fs . open ( path , "rb" ) : lines += 1 if line . rstrip ( ) : non_blank += 1 return LineCounts ( lines = lines , non_blank = non_blank )
Count the lines in the matched files .
94
8
227,578
def remove ( self ) : # type: () -> int removes = 0 for path , info in self . _make_iter ( search = "depth" ) : if info . is_dir : self . fs . removetree ( path ) else : self . fs . remove ( path ) removes += 1 return removes
Removed all matched paths .
65
5
227,579
def move_file ( src_fs , # type: Union[Text, FS] src_path , # type: Text dst_fs , # type: Union[Text, FS] dst_path , # type: Text ) : # type: (...) -> None with manage_fs ( src_fs ) as _src_fs : with manage_fs ( dst_fs , create = True ) as _dst_fs : if _src_fs is _dst_fs : # Same filesystem, may be optimized _src_fs . move ( src_path , dst_path , overwrite = True ) else : # Standard copy and delete with _src_fs . lock ( ) , _dst_fs . lock ( ) : copy_file ( _src_fs , src_path , _dst_fs , dst_path ) _src_fs . remove ( src_path )
Move a file from one filesystem to another .
190
9
227,580
def move_dir ( src_fs , # type: Union[Text, FS] src_path , # type: Text dst_fs , # type: Union[Text, FS] dst_path , # type: Text workers = 0 , # type: int ) : # type: (...) -> None def src ( ) : return manage_fs ( src_fs , writeable = False ) def dst ( ) : return manage_fs ( dst_fs , create = True ) with src ( ) as _src_fs , dst ( ) as _dst_fs : with _src_fs . lock ( ) , _dst_fs . lock ( ) : _dst_fs . makedir ( dst_path , recreate = True ) copy_dir ( src_fs , src_path , dst_fs , dst_path , workers = workers ) _src_fs . removetree ( src_path )
Move a directory from one filesystem to another .
195
9
227,581
def recursepath ( path , reverse = False ) : # type: (Text, bool) -> List[Text] if path in "/" : return [ "/" ] path = abspath ( normpath ( path ) ) + "/" paths = [ "/" ] find = path . find append = paths . append pos = 1 len_path = len ( path ) while pos < len_path : pos = find ( "/" , pos ) append ( path [ : pos ] ) pos += 1 if reverse : return paths [ : : - 1 ] return paths
Get intermediate paths from the root to the given path .
117
11
227,582
def join ( * paths ) : # type: (*Text) -> Text absolute = False relpaths = [ ] # type: List[Text] for p in paths : if p : if p [ 0 ] == "/" : del relpaths [ : ] absolute = True relpaths . append ( p ) path = normpath ( "/" . join ( relpaths ) ) if absolute : path = abspath ( path ) return path
Join any number of paths together .
93
7
227,583
def combine ( path1 , path2 ) : # type: (Text, Text) -> Text if not path1 : return path2 . lstrip ( ) return "{}/{}" . format ( path1 . rstrip ( "/" ) , path2 . lstrip ( "/" ) )
Join two paths together .
62
5
227,584
def parts ( path ) : # type: (Text) -> List[Text] _path = normpath ( path ) components = _path . strip ( "/" ) _parts = [ "/" if _path . startswith ( "/" ) else "./" ] if components : _parts += components . split ( "/" ) return _parts
Split a path in to its component parts .
73
9
227,585
def splitext ( path ) : # type: (Text) -> Tuple[Text, Text] parent_path , pathname = split ( path ) if pathname . startswith ( "." ) and pathname . count ( "." ) == 1 : return path , "" if "." not in pathname : return path , "" pathname , ext = pathname . rsplit ( "." , 1 ) path = join ( parent_path , pathname ) return path , "." + ext
Split the extension from the path .
106
7
227,586
def isbase ( path1 , path2 ) : # type: (Text, Text) -> bool _path1 = forcedir ( abspath ( path1 ) ) _path2 = forcedir ( abspath ( path2 ) ) return _path2 . startswith ( _path1 )
Check if path1 is a base of path2 .
62
11
227,587
def isparent ( path1 , path2 ) : # type: (Text, Text) -> bool bits1 = path1 . split ( "/" ) bits2 = path2 . split ( "/" ) while bits1 and bits1 [ - 1 ] == "" : bits1 . pop ( ) if len ( bits1 ) > len ( bits2 ) : return False for ( bit1 , bit2 ) in zip ( bits1 , bits2 ) : if bit1 != bit2 : return False return True
Check if path1 is a parent directory of path2 .
106
12
227,588
def frombase ( path1 , path2 ) : # type: (Text, Text) -> Text if not isparent ( path1 , path2 ) : raise ValueError ( "path1 must be a prefix of path2" ) return path2 [ len ( path1 ) : ]
Get the final path of path2 that isn t in path1 .
60
14
227,589
def relativefrom ( base , path ) : # type: (Text, Text) -> Text base_parts = list ( iteratepath ( base ) ) path_parts = list ( iteratepath ( path ) ) common = 0 for component_a , component_b in zip ( base_parts , path_parts ) : if component_a != component_b : break common += 1 return "/" . join ( [ ".." ] * ( len ( base_parts ) - common ) + path_parts [ common : ] )
Return a path relative from a given base path .
112
10
227,590
def unwrap_errors ( path_replace ) : # type: (Union[Text, Mapping[Text, Text]]) -> Iterator[None] try : yield except errors . ResourceError as e : if hasattr ( e , "path" ) : if isinstance ( path_replace , Mapping ) : e . path = path_replace . get ( e . path , e . path ) else : e . path = path_replace reraise ( type ( e ) , e )
Get a context to map OS errors to their fs . errors counterpart .
104
14
227,591
def match ( pattern , name ) : # type: (Text, Text) -> bool try : re_pat = _PATTERN_CACHE [ ( pattern , True ) ] except KeyError : res = "(?ms)" + _translate ( pattern ) + r'\Z' _PATTERN_CACHE [ ( pattern , True ) ] = re_pat = re . compile ( res ) return re_pat . match ( name ) is not None
Test whether a name matches a wildcard pattern .
100
10
227,592
def match_any ( patterns , name ) : # type: (Iterable[Text], Text) -> bool if not patterns : return True return any ( match ( pattern , name ) for pattern in patterns )
Test if a name matches any of a list of patterns .
43
12
227,593
def get_matcher ( patterns , case_sensitive ) : # type: (Iterable[Text], bool) -> Callable[[Text], bool] if not patterns : return lambda name : True if case_sensitive : return partial ( match_any , patterns ) else : return partial ( imatch_any , patterns )
Get a callable that matches names against the given patterns .
67
12
227,594
def _translate ( pattern , case_sensitive = True ) : # type: (Text, bool) -> Text if not case_sensitive : pattern = pattern . lower ( ) i , n = 0 , len ( pattern ) res = "" while i < n : c = pattern [ i ] i = i + 1 if c == "*" : res = res + "[^/]*" elif c == "?" : res = res + "." elif c == "[" : j = i if j < n and pattern [ j ] == "!" : j = j + 1 if j < n and pattern [ j ] == "]" : j = j + 1 while j < n and pattern [ j ] != "]" : j = j + 1 if j >= n : res = res + "\\[" else : stuff = pattern [ i : j ] . replace ( "\\" , "\\\\" ) i = j + 1 if stuff [ 0 ] == "!" : stuff = "^" + stuff [ 1 : ] elif stuff [ 0 ] == "^" : stuff = "\\" + stuff res = "%s[%s]" % ( res , stuff ) else : res = res + re . escape ( c ) return res
Translate a wildcard pattern to a regular expression .
264
11
227,595
def _delegate ( self , path ) : # type: (Text) -> Tuple[FS, Text] _path = forcedir ( abspath ( normpath ( path ) ) ) is_mounted = _path . startswith for mount_path , fs in self . mounts : if is_mounted ( mount_path ) : return fs , _path [ len ( mount_path ) : ] . rstrip ( "/" ) return self . default_fs , path
Get the delegate FS for a given path .
100
9
227,596
def mount ( self , path , fs ) : # type: (Text, Union[FS, Text]) -> None if isinstance ( fs , text_type ) : from . opener import open_fs fs = open_fs ( fs ) if not isinstance ( fs , FS ) : raise TypeError ( "fs argument must be an FS object or a FS URL" ) if fs is self : raise ValueError ( "Unable to mount self" ) _path = forcedir ( abspath ( normpath ( path ) ) ) for mount_path , _ in self . mounts : if _path . startswith ( mount_path ) : raise MountError ( "mount point overlaps existing mount" ) self . mounts . append ( ( _path , fs ) ) self . default_fs . makedirs ( _path , recreate = True )
Mounts a host FS object on a given path .
178
11
227,597
def start ( self ) : if self . num_workers : self . queue = Queue ( maxsize = self . num_workers ) self . workers = [ _Worker ( self ) for _ in range ( self . num_workers ) ] for worker in self . workers : worker . start ( ) self . running = True
Start the workers .
69
4
227,598
def copy ( self , src_fs , src_path , dst_fs , dst_path ) : # type: (FS, Text, FS, Text) -> None if self . queue is None : # This should be the most performant for a single-thread copy_file_internal ( src_fs , src_path , dst_fs , dst_path ) else : src_file = src_fs . openbin ( src_path , "r" ) try : dst_file = dst_fs . openbin ( dst_path , "w" ) except Exception : src_file . close ( ) raise task = _CopyTask ( src_file , dst_file ) self . queue . put ( task )
Copy a file from one fs to another .
153
9
227,599
def add_fs ( self , name , fs , write = False , priority = 0 ) : # type: (Text, FS, bool, int) -> None if isinstance ( fs , text_type ) : fs = open_fs ( fs ) if not isinstance ( fs , FS ) : raise TypeError ( "fs argument should be an FS object or FS URL" ) self . _filesystems [ name ] = _PrioritizedFS ( priority = ( priority , self . _sort_index ) , fs = fs ) self . _sort_index += 1 self . _resort ( ) if write : self . write_fs = fs self . _write_fs_name = name
Add a filesystem to the MultiFS .
147
8