INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Set the R/G/B and optionally intensity in one call
def set(self, r, g, b, intensity=None): """Set the R/G/B and optionally intensity in one call""" self.r = r self.g = g self.b = b if intensity: self.intensity = intensity
Callback for when new memory data has been fetched
def new_data(self, mem, addr, data): """Callback for when new memory data has been fetched""" if mem.id == self.id: logger.debug( "Got new data from the LED driver, but we don't care.")
Write the saved LED-ring data to the Crazyflie
def write_data(self, write_finished_cb): """Write the saved LED-ring data to the Crazyflie""" self._write_finished_cb = write_finished_cb data = bytearray() for led in self.leds: # In order to fit all the LEDs in one radio packet RGB565 is used # to compress the colors. The calculations below converts 3 bytes # RGB into 2 bytes RGB565. Then shifts the value of each color to # LSB, applies the intensity and shifts them back for correct # alignment on 2 bytes. R5 = ((int)((((int(led.r) & 0xFF) * 249 + 1014) >> 11) & 0x1F) * led.intensity / 100) G6 = ((int)((((int(led.g) & 0xFF) * 253 + 505) >> 10) & 0x3F) * led.intensity / 100) B5 = ((int)((((int(led.b) & 0xFF) * 249 + 1014) >> 11) & 0x1F) * led.intensity / 100) tmp = (int(R5) << 11) | (int(G6) << 5) | (int(B5) << 0) data += bytearray((tmp >> 8, tmp & 0xFF)) self.mem_handler.write(self, 0x00, data, flush_queue=True)
Callback for when new memory data has been fetched
def new_data(self, mem, addr, data): """Callback for when new memory data has been fetched""" if mem.id == self.id: if addr == 0: done = False # Check for header if data[0:4] == EEPROM_TOKEN: logger.debug('Got new data: {}'.format(data)) [self.elements['version'], self.elements['radio_channel'], self.elements['radio_speed'], self.elements['pitch_trim'], self.elements['roll_trim']] = struct.unpack('<BBBff', data[4:15]) if self.elements['version'] == 0: done = True elif self.elements['version'] == 1: self.datav0 = data self.mem_handler.read(self, 16, 5) else: self.valid = False if self._update_finished_cb: self._update_finished_cb(self) self._update_finished_cb = None if addr == 16: [radio_address_upper, radio_address_lower] = struct.unpack( '<BI', self.datav0[15:16] + data[0:4]) self.elements['radio_address'] = int( radio_address_upper) << 32 | radio_address_lower logger.debug(self.elements) data = self.datav0 + data done = True if done: if self._checksum256(data[:len(data) - 1]) == \ data[len(data) - 1]: self.valid = True if self._update_finished_cb: self._update_finished_cb(self) self._update_finished_cb = None
Callback for when new memory data has been fetched
def new_data(self, mem, addr, data): """Callback for when new memory data has been fetched""" if mem.id == self.id: if addr == 0: if self._parse_and_check_header(data[0:8]): if self._parse_and_check_elements(data[9:11]): self.valid = True self._update_finished_cb(self) self._update_finished_cb = None else: # We need to fetch the elements, find out the length (elem_ver, elem_len) = struct.unpack('BB', data[8:10]) self.mem_handler.read(self, 8, elem_len + 3) else: # Call the update if the CRC check of the header fails, # we're done here if self._update_finished_cb: self._update_finished_cb(self) self._update_finished_cb = None elif addr == 0x08: if self._parse_and_check_elements(data): self.valid = True if self._update_finished_cb: self._update_finished_cb(self) self._update_finished_cb = None
Parse and check the CRC and length of the elements part of the memory
def _parse_and_check_elements(self, data): """ Parse and check the CRC and length of the elements part of the memory """ crc = data[-1] test_crc = crc32(data[:-1]) & 0x0ff elem_data = data[2:-1] if test_crc == crc: while len(elem_data) > 0: (eid, elen) = struct.unpack('BB', elem_data[:2]) self.elements[self.element_mapping[eid]] = \ elem_data[2:2 + elen].decode('ISO-8859-1') elem_data = elem_data[2 + elen:] return True return False
Request an update of the memory content
def update(self, update_finished_cb): """Request an update of the memory content""" if not self._update_finished_cb: self._update_finished_cb = update_finished_cb self.valid = False logger.debug('Updating content of memory {}'.format(self.id)) # Start reading the header self.mem_handler.read(self, 0, 11)
Parse and check the CRC of the header part of the memory
def _parse_and_check_header(self, data): """Parse and check the CRC of the header part of the memory""" (start, self.pins, self.vid, self.pid, crc) = struct.unpack('<BIBBB', data) test_crc = crc32(data[:-1]) & 0x0ff if start == 0xEB and crc == test_crc: return True return False
Callback for when new memory data has been fetched
def new_data(self, mem, addr, data): """Callback for when new memory data has been fetched""" done = False if mem.id == self.id: if addr == LocoMemory.MEM_LOCO_INFO: self.nr_of_anchors = data[0] if self.nr_of_anchors == 0: done = True else: self.anchor_data = \ [AnchorData() for _ in range(self.nr_of_anchors)] self._request_page(0) else: page = int((addr - LocoMemory.MEM_LOCO_ANCHOR_BASE) / LocoMemory.MEM_LOCO_ANCHOR_PAGE_SIZE) self.anchor_data[page].set_from_mem_data(data) next_page = page + 1 if next_page < self.nr_of_anchors: self._request_page(next_page) else: done = True if done: self.valid = True if self._update_finished_cb: self._update_finished_cb(self) self._update_finished_cb = None
Request an update of the memory content
def update(self, update_finished_cb): """Request an update of the memory content""" if not self._update_finished_cb: self._update_finished_cb = update_finished_cb self.anchor_data = [] self.nr_of_anchors = 0 self.valid = False logger.debug('Updating content of memory {}'.format(self.id)) # Start reading the header self.mem_handler.read(self, LocoMemory.MEM_LOCO_INFO, LocoMemory.MEM_LOCO_INFO_LEN)
Callback for when new memory data has been fetched
def new_data(self, mem, addr, data): """Callback for when new memory data has been fetched""" if mem.id == self.id: if addr == LocoMemory2.ADR_ID_LIST: self._handle_id_list_data(data) elif addr == LocoMemory2.ADR_ACTIVE_ID_LIST: self._handle_active_id_list_data(data) else: id = int((addr - LocoMemory2.ADR_ANCHOR_BASE) / LocoMemory2.ANCHOR_PAGE_SIZE) self._handle_anchor_data(id, data)
Request an update of the id list
def update_id_list(self, update_ids_finished_cb): """Request an update of the id list""" if not self._update_ids_finished_cb: self._update_ids_finished_cb = update_ids_finished_cb self.anchor_ids = [] self.active_anchor_ids = [] self.anchor_data = {} self.nr_of_anchors = 0 self.ids_valid = False self.data_valid = False logger.debug('Updating ids of memory {}'.format(self.id)) # Start reading the header self.mem_handler.read(self, LocoMemory2.ADR_ID_LIST, LocoMemory2.ID_LIST_LEN)
Request an update of the active id list
def update_active_id_list(self, update_active_ids_finished_cb): """Request an update of the active id list""" if not self._update_active_ids_finished_cb: self._update_active_ids_finished_cb = update_active_ids_finished_cb self.active_anchor_ids = [] self.active_ids_valid = False logger.debug('Updating active ids of memory {}'.format(self.id)) # Start reading the header self.mem_handler.read(self, LocoMemory2.ADR_ACTIVE_ID_LIST, LocoMemory2.ID_LIST_LEN)
Request an update of the anchor data
def update_data(self, update_data_finished_cb): """Request an update of the anchor data""" if not self._update_data_finished_cb and self.nr_of_anchors > 0: self._update_data_finished_cb = update_data_finished_cb self.anchor_data = {} self.data_valid = False self._nr_of_anchors_to_fetch = self.nr_of_anchors logger.debug('Updating anchor data of memory {}'.format(self.id)) # Start reading the first anchor self._currently_fetching_index = 0 self._request_page(self.anchor_ids[self._currently_fetching_index])
Write trajectory data to the Crazyflie
def write_data(self, write_finished_cb): """Write trajectory data to the Crazyflie""" self._write_finished_cb = write_finished_cb data = bytearray() for poly4D in self.poly4Ds: data += struct.pack('<ffffffff', *poly4D.x.values) data += struct.pack('<ffffffff', *poly4D.y.values) data += struct.pack('<ffffffff', *poly4D.z.values) data += struct.pack('<ffffffff', *poly4D.yaw.values) data += struct.pack('<f', poly4D.duration) self.mem_handler.write(self, 0x00, data, flush_queue=True)
Called to request a new chunk of data to be read from the Crazyflie
def _request_new_chunk(self): """ Called to request a new chunk of data to be read from the Crazyflie """ # Figure out the length of the next request new_len = self._bytes_left if new_len > _ReadRequest.MAX_DATA_LENGTH: new_len = _ReadRequest.MAX_DATA_LENGTH logger.debug('Requesting new chunk of {}bytes at 0x{:X}'.format( new_len, self._current_addr)) # Request the data for the next address pk = CRTPPacket() pk.set_header(CRTPPort.MEM, CHAN_READ) pk.data = struct.pack('<BIB', self.mem.id, self._current_addr, new_len) reply = struct.unpack('<BBBBB', pk.data[:-1]) self.cf.send_packet(pk, expected_reply=reply, timeout=1)
Callback when data is received from the Crazyflie
def add_data(self, addr, data): """Callback when data is received from the Crazyflie""" data_len = len(data) if not addr == self._current_addr: logger.warning( 'Address did not match when adding data to read request!') return # Add the data and calculate the next address to fetch self.data += data self._bytes_left -= data_len self._current_addr += data_len if self._bytes_left > 0: self._request_new_chunk() return False else: return True
Called to request a new chunk of data to be read from the Crazyflie
def _write_new_chunk(self): """ Called to request a new chunk of data to be read from the Crazyflie """ # Figure out the length of the next request new_len = len(self._data) if new_len > _WriteRequest.MAX_DATA_LENGTH: new_len = _WriteRequest.MAX_DATA_LENGTH logger.debug('Writing new chunk of {}bytes at 0x{:X}'.format( new_len, self._current_addr)) data = self._data[:new_len] self._data = self._data[new_len:] pk = CRTPPacket() pk.set_header(CRTPPort.MEM, CHAN_WRITE) pk.data = struct.pack('<BI', self.mem.id, self._current_addr) # Create a tuple used for matching the reply using id and address reply = struct.unpack('<BBBBB', pk.data) self._sent_reply = reply # Add the data pk.data += struct.pack('B' * len(data), *data) self._sent_packet = pk self.cf.send_packet(pk, expected_reply=reply, timeout=1) self._addr_add = len(data)
Callback when data is received from the Crazyflie
def write_done(self, addr): """Callback when data is received from the Crazyflie""" if not addr == self._current_addr: logger.warning( 'Address did not match when adding data to read request!') return if len(self._data) > 0: self._current_addr += self._addr_add self._write_new_chunk() return False else: logger.debug('This write request is done') return True
Callback from each individual memory (only 1-wire) when reading of header/elements are done
def _mem_update_done(self, mem): """ Callback from each individual memory (only 1-wire) when reading of header/elements are done """ if mem.id in self._ow_mems_left_to_update: self._ow_mems_left_to_update.remove(mem.id) logger.debug(mem) if len(self._ow_mems_left_to_update) == 0: if self._refresh_callback: self._refresh_callback() self._refresh_callback = None
Fetch the memory with the supplied id
def get_mem(self, id): """Fetch the memory with the supplied id""" for m in self.mems: if m.id == id: return m return None
Fetch all the memories of the supplied type
def get_mems(self, type): """Fetch all the memories of the supplied type""" ret = () for m in self.mems: if m.type == type: ret += (m,) return ret
Search for specific memory id/name and return it
def ow_search(self, vid=0xBC, pid=None, name=None): """Search for specific memory id/name and return it""" for m in self.get_mems(MemoryElement.TYPE_1W): if pid and m.pid == pid or name and m.name == name: return m return None
Write the specified data to the given memory at the given address
def write(self, memory, addr, data, flush_queue=False): """Write the specified data to the given memory at the given address""" wreq = _WriteRequest(memory, addr, data, self.cf) if memory.id not in self._write_requests: self._write_requests[memory.id] = [] # Workaround until we secure the uplink and change messages for # mems to non-blocking self._write_requests_lock.acquire() if flush_queue: self._write_requests[memory.id] = self._write_requests[ memory.id][:1] self._write_requests[memory.id].insert(len(self._write_requests), wreq) if len(self._write_requests[memory.id]) == 1: wreq.start() self._write_requests_lock.release() return True
Read the specified amount of bytes from the given memory at the given address
def read(self, memory, addr, length): """ Read the specified amount of bytes from the given memory at the given address """ if memory.id in self._read_requests: logger.warning('There is already a read operation ongoing for ' 'memory id {}'.format(memory.id)) return False rreq = _ReadRequest(memory, addr, length, self.cf) self._read_requests[memory.id] = rreq rreq.start() return True
Start fetching all the detected memories
def refresh(self, refresh_done_callback): """Start fetching all the detected memories""" self._refresh_callback = refresh_done_callback self._fetch_id = 0 for m in self.mems: try: self.mem_read_cb.remove_callback(m.new_data) m.disconnect() except Exception as e: logger.info( 'Error when removing memory after update: {}'.format(e)) self.mems = [] self.nbr_of_mems = 0 self._getting_count = False logger.debug('Requesting number of memories') pk = CRTPPacket() pk.set_header(CRTPPort.MEM, CHAN_INFO) pk.data = (CMD_INFO_NBR,) self.cf.send_packet(pk, expected_reply=(CMD_INFO_NBR,))
Callback for newly arrived packets for the memory port
def _new_packet_cb(self, packet): """Callback for newly arrived packets for the memory port""" chan = packet.channel cmd = packet.data[0] payload = packet.data[1:] if chan == CHAN_INFO: if cmd == CMD_INFO_NBR: self.nbr_of_mems = payload[0] logger.info('{} memories found'.format(self.nbr_of_mems)) # Start requesting information about the memories, # if there are any... if self.nbr_of_mems > 0: if not self._getting_count: self._getting_count = True logger.debug('Requesting first id') pk = CRTPPacket() pk.set_header(CRTPPort.MEM, CHAN_INFO) pk.data = (CMD_INFO_DETAILS, 0) self.cf.send_packet(pk, expected_reply=( CMD_INFO_DETAILS, 0)) else: self._refresh_callback() if cmd == CMD_INFO_DETAILS: # Did we get a good reply, otherwise try again: if len(payload) < 5: # Workaround for 1-wire bug when memory is detected # but updating the info crashes the communication with # the 1-wire. Fail by saying we only found 1 memory # (the I2C). logger.error( '-------->Got good count, but no info on mem!') self.nbr_of_mems = 1 if self._refresh_callback: self._refresh_callback() self._refresh_callback = None return # Create information about a new memory # Id - 1 byte mem_id = payload[0] # Type - 1 byte mem_type = payload[1] # Size 4 bytes (as addr) mem_size = struct.unpack('I', payload[2:6])[0] # Addr (only valid for 1-wire?) mem_addr_raw = struct.unpack('B' * 8, payload[6:14]) mem_addr = '' for m in mem_addr_raw: mem_addr += '{:02X}'.format(m) if (not self.get_mem(mem_id)): if mem_type == MemoryElement.TYPE_1W: mem = OWElement(id=mem_id, type=mem_type, size=mem_size, addr=mem_addr, mem_handler=self) self.mem_read_cb.add_callback(mem.new_data) self.mem_write_cb.add_callback(mem.write_done) self._ow_mems_left_to_update.append(mem.id) elif mem_type == MemoryElement.TYPE_I2C: mem = I2CElement(id=mem_id, type=mem_type, size=mem_size, mem_handler=self) self.mem_read_cb.add_callback(mem.new_data) self.mem_write_cb.add_callback(mem.write_done) elif mem_type == MemoryElement.TYPE_DRIVER_LED: mem = LEDDriverMemory(id=mem_id, type=mem_type, size=mem_size, mem_handler=self) logger.debug(mem) self.mem_read_cb.add_callback(mem.new_data) self.mem_write_cb.add_callback(mem.write_done) elif mem_type == MemoryElement.TYPE_LOCO: mem = LocoMemory(id=mem_id, type=mem_type, size=mem_size, mem_handler=self) logger.debug(mem) self.mem_read_cb.add_callback(mem.new_data) elif mem_type == MemoryElement.TYPE_TRAJ: mem = TrajectoryMemory(id=mem_id, type=mem_type, size=mem_size, mem_handler=self) logger.debug(mem) self.mem_write_cb.add_callback(mem.write_done) elif mem_type == MemoryElement.TYPE_LOCO2: mem = LocoMemory2(id=mem_id, type=mem_type, size=mem_size, mem_handler=self) logger.debug(mem) self.mem_read_cb.add_callback(mem.new_data) else: mem = MemoryElement(id=mem_id, type=mem_type, size=mem_size, mem_handler=self) logger.debug(mem) self.mems.append(mem) self.mem_added_cb.call(mem) self._fetch_id = mem_id + 1 if self.nbr_of_mems - 1 >= self._fetch_id: logger.debug( 'Requesting information about memory {}'.format( self._fetch_id)) pk = CRTPPacket() pk.set_header(CRTPPort.MEM, CHAN_INFO) pk.data = (CMD_INFO_DETAILS, self._fetch_id) self.cf.send_packet(pk, expected_reply=( CMD_INFO_DETAILS, self._fetch_id)) else: logger.debug( 'Done getting all the memories, start reading the OWs') ows = self.get_mems(MemoryElement.TYPE_1W) # If there are any OW mems start reading them, otherwise # we are done for ow_mem in ows: ow_mem.update(self._mem_update_done) if len(ows) == 0: if self._refresh_callback: self._refresh_callback() self._refresh_callback = None if chan == CHAN_WRITE: id = cmd (addr, status) = struct.unpack('<IB', payload[0:5]) logger.debug( 'WRITE: Mem={}, addr=0x{:X}, status=0x{}'.format( id, addr, status)) # Find the read request if id in self._write_requests: self._write_requests_lock.acquire() wreq = self._write_requests[id][0] if status == 0: if wreq.write_done(addr): # self._write_requests.pop(id, None) # Remove the first item self._write_requests[id].pop(0) self.mem_write_cb.call(wreq.mem, wreq.addr) # Get a new one to start (if there are any) if len(self._write_requests[id]) > 0: self._write_requests[id][0].start() else: logger.debug( 'Status {}: write resending...'.format(status)) wreq.resend() self._write_requests_lock.release() if chan == CHAN_READ: id = cmd (addr, status) = struct.unpack('<IB', payload[0:5]) data = struct.unpack('B' * len(payload[5:]), payload[5:]) logger.debug('READ: Mem={}, addr=0x{:X}, status=0x{}, ' 'data={}'.format(id, addr, status, data)) # Find the read request if id in self._read_requests: logger.debug( 'READING: We are still interested in request for ' 'mem {}'.format(id)) rreq = self._read_requests[id] if status == 0: if rreq.add_data(addr, payload[5:]): self._read_requests.pop(id, None) self.mem_read_cb.call(rreq.mem, rreq.addr, rreq.data) else: logger.debug('Status {}: resending...'.format(status)) rreq.resend()
Reset to the bootloader The parameter cpuid shall correspond to the device to reset. Return true if the reset has been done and the contact with the bootloader is established.
def reset_to_bootloader1(self, cpu_id): """ Reset to the bootloader The parameter cpuid shall correspond to the device to reset. Return true if the reset has been done and the contact with the bootloader is established. """ # Send an echo request and wait for the answer # Mainly aim to bypass a bug of the crazyflie firmware that prevents # reset before normal CRTP communication pk = CRTPPacket() pk.port = CRTPPort.LINKCTRL pk.data = (1, 2, 3) + cpu_id self.link.send_packet(pk) pk = None while True: pk = self.link.receive_packet(2) if not pk: return False if pk.port == CRTPPort.LINKCTRL: break # Send the reset to bootloader request pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = (0xFF, 0xFE) + cpu_id self.link.send_packet(pk) # Wait to ack the reset ... pk = None while True: pk = self.link.receive_packet(2) if not pk: return False if pk.port == 0xFF and tuple(pk.data) == (0xFF, 0xFE) + cpu_id: pk.data = (0xFF, 0xF0) + cpu_id self.link.send_packet(pk) break time.sleep(0.1) self.link.close() self.link = cflib.crtp.get_link_driver(self.clink_address) # time.sleep(0.1) return self._update_info()
Reset to firmware The parameter cpuid shall correspond to the device to reset. Return true if the reset has been done
def reset_to_firmware(self, target_id): """ Reset to firmware The parameter cpuid shall correspond to the device to reset. Return true if the reset has been done """ # The fake CPU ID is legacy from the Crazyflie 1.0 # In order to reset the CPU id had to be sent, but this # was removed before launching it. But the length check is # still in the bootloader. So to work around this bug so # some extra data needs to be sent. fake_cpu_id = (1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12) # Send the reset to bootloader request pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = (target_id, 0xFF) + fake_cpu_id self.link.send_packet(pk) # Wait to ack the reset ... pk = None while True: pk = self.link.receive_packet(2) if not pk: return False if (pk.header == 0xFF and struct.unpack( 'B' * len(pk.data), pk.data)[:2] == (target_id, 0xFF)): # Difference in CF1 and CF2 (CPU ID) if target_id == 0xFE: pk.data = (target_id, 0xF0, 0x01) else: pk.data = (target_id, 0xF0) + fake_cpu_id self.link.send_packet(pk) break time.sleep(0.1)
Try to get a connection with the bootloader by requesting info 5 times. This let roughly 10 seconds to boot the copter ...
def check_link_and_get_info(self, target_id=0xFF): """Try to get a connection with the bootloader by requesting info 5 times. This let roughly 10 seconds to boot the copter ...""" for _ in range(0, 5): if self._update_info(target_id): if self._in_boot_cb: self._in_boot_cb.call(True, self.targets[ target_id].protocol_version) if self._info_cb: self._info_cb.call(self.targets[target_id]) return True return False
Call the command getInfo and fill up the information received in the fields of the object
def _update_info(self, target_id): """ Call the command getInfo and fill up the information received in the fields of the object """ # Call getInfo ... pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = (target_id, 0x10) self.link.send_packet(pk) # Wait for the answer pk = self.link.receive_packet(2) if (pk and pk.header == 0xFF and struct.unpack('<BB', pk.data[0:2]) == (target_id, 0x10)): tab = struct.unpack('BBHHHH', pk.data[0:10]) cpuid = struct.unpack('B' * 12, pk.data[10:22]) if target_id not in self.targets: self.targets[target_id] = Target(target_id) self.targets[target_id].addr = target_id if len(pk.data) > 22: self.targets[target_id].protocol_version = pk.datat[22] self.protocol_version = pk.datat[22] self.targets[target_id].page_size = tab[2] self.targets[target_id].buffer_pages = tab[3] self.targets[target_id].flash_pages = tab[4] self.targets[target_id].start_page = tab[5] self.targets[target_id].cpuid = '%02X' % cpuid[0] for i in cpuid[1:]: self.targets[target_id].cpuid += ':%02X' % i if (self.protocol_version == 0x10 and target_id == TargetTypes.STM32): self._update_mapping(target_id) return True return False
Upload data into a buffer on the Crazyflie
def upload_buffer(self, target_id, page, address, buff): """Upload data into a buffer on the Crazyflie""" # print len(buff) count = 0 pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = struct.pack('=BBHH', target_id, 0x14, page, address) for i in range(0, len(buff)): pk.data.append(buff[i]) count += 1 if count > 24: self.link.send_packet(pk) count = 0 pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = struct.pack('=BBHH', target_id, 0x14, page, i + address + 1) self.link.send_packet(pk)
Read back a flash page from the Crazyflie and return it
def read_flash(self, addr=0xFF, page=0x00): """Read back a flash page from the Crazyflie and return it""" buff = bytearray() page_size = self.targets[addr].page_size for i in range(0, int(math.ceil(page_size / 25.0))): pk = None retry_counter = 5 while ((not pk or pk.header != 0xFF or struct.unpack('<BB', pk.data[0:2]) != (addr, 0x1C)) and retry_counter >= 0): pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = struct.pack('<BBHH', addr, 0x1C, page, (i * 25)) self.link.send_packet(pk) pk = self.link.receive_packet(1) retry_counter -= 1 if (retry_counter < 0): return None else: buff += pk.data[6:] # For some reason we get one byte extra here... return buff[0:page_size]
Initiate flashing of data in the buffer to flash.
def write_flash(self, addr, page_buffer, target_page, page_count): """Initiate flashing of data in the buffer to flash.""" # print "Write page", flashPage # print "Writing page [%d] and [%d] forward" % (flashPage, nPage) pk = None # Flushing downlink ... pk = self.link.receive_packet(0) while pk is not None: pk = self.link.receive_packet(0) retry_counter = 5 # print "Flasing to 0x{:X}".format(addr) while ((not pk or pk.header != 0xFF or struct.unpack('<BB', pk.data[0:2]) != (addr, 0x18)) and retry_counter >= 0): pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = struct.pack('<BBHHH', addr, 0x18, page_buffer, target_page, page_count) self.link.send_packet(pk) pk = self.link.receive_packet(1) retry_counter -= 1 if retry_counter < 0: self.error_code = -1 return False self.error_code = pk.data[3] return pk.data[2] == 1
Decode the CPU id into a string
def decode_cpu_id(self, cpuid): """Decode the CPU id into a string""" ret = () for i in cpuid.split(':'): ret += (eval('0x' + i),) return ret
Set the port and channel for this packet.
def set_header(self, port, channel): """ Set the port and channel for this packet. """ self._port = port self.channel = channel self._update_header()
Set the packet data
def _set_data(self, data): """Set the packet data""" if type(data) == bytearray: self._data = data elif type(data) == str: if sys.version_info < (3,): self._data = bytearray(data) else: self._data = bytearray(data.encode('ISO-8859-1')) elif type(data) == list or type(data) == tuple: self._data = bytearray(data) elif sys.version_info >= (3,) and type(data) == bytes: self._data = bytearray(data) else: raise Exception('Data must be bytearray, string, list or tuple,' ' not {}'.format(type(data)))
Takes off, that is starts the motors, goes straigt up and hovers. Do not call this function if you use the with keyword. Take off is done automatically when the context is created. :param height: the height (meters) to hover at. None uses the default height set when constructed. :param velocity: the velocity (meters/second) when taking off :return:
def take_off(self, height=None, velocity=VELOCITY): """ Takes off, that is starts the motors, goes straigt up and hovers. Do not call this function if you use the with keyword. Take off is done automatically when the context is created. :param height: the height (meters) to hover at. None uses the default height set when constructed. :param velocity: the velocity (meters/second) when taking off :return: """ if self._is_flying: raise Exception('Already flying') if not self._cf.is_connected(): raise Exception('Crazyflie is not connected') self._is_flying = True self._reset_position_estimator() self._thread = _SetPointThread(self._cf) self._thread.start() if height is None: height = self.default_height self.up(height, velocity)
Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return:
def land(self, velocity=VELOCITY): """ Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return: """ if self._is_flying: self.down(self._thread.get_height(), velocity) self._thread.stop() self._thread = None self._cf.commander.send_stop_setpoint() self._is_flying = False
Turn to the left, staying on the spot :param angle_degrees: How far to turn (degrees) :param rate: The trurning speed (degrees/second) :return:
def turn_left(self, angle_degrees, rate=RATE): """ Turn to the left, staying on the spot :param angle_degrees: How far to turn (degrees) :param rate: The trurning speed (degrees/second) :return: """ flight_time = angle_degrees / rate self.start_turn_left(rate) time.sleep(flight_time) self.stop()
Turn to the right, staying on the spot :param angle_degrees: How far to turn (degrees) :param rate: The trurning speed (degrees/second) :return:
def turn_right(self, angle_degrees, rate=RATE): """ Turn to the right, staying on the spot :param angle_degrees: How far to turn (degrees) :param rate: The trurning speed (degrees/second) :return: """ flight_time = angle_degrees / rate self.start_turn_right(rate) time.sleep(flight_time) self.stop()
Go in circle, counter clock wise :param radius_m: The radius of the circle (meters) :param velocity: The velocity along the circle (meters/second) :param angle_degrees: How far to go in the circle (degrees) :return:
def circle_left(self, radius_m, velocity=VELOCITY, angle_degrees=360.0): """ Go in circle, counter clock wise :param radius_m: The radius of the circle (meters) :param velocity: The velocity along the circle (meters/second) :param angle_degrees: How far to go in the circle (degrees) :return: """ distance = 2 * radius_m * math.pi * angle_degrees / 360.0 flight_time = distance / velocity self.start_circle_left(radius_m, velocity) time.sleep(flight_time) self.stop()
Go in circle, clock wise :param radius_m: The radius of the circle (meters) :param velocity: The velocity along the circle (meters/second) :param angle_degrees: How far to go in the circle (degrees) :return:
def circle_right(self, radius_m, velocity=VELOCITY, angle_degrees=360.0): """ Go in circle, clock wise :param radius_m: The radius of the circle (meters) :param velocity: The velocity along the circle (meters/second) :param angle_degrees: How far to go in the circle (degrees) :return: """ distance = 2 * radius_m * math.pi * angle_degrees / 360.0 flight_time = distance / velocity self.start_circle_right(radius_m, velocity) time.sleep(flight_time) self.stop()
Move in a straight line. positive X is forward positive Y is left positive Z is up :param distance_x_m: The distance to travel along the X-axis (meters) :param distance_y_m: The distance to travel along the Y-axis (meters) :param distance_z_m: The distance to travel along the Z-axis (meters) :param velocity: the velocity of the motion (meters/second) :return:
def move_distance(self, distance_x_m, distance_y_m, distance_z_m, velocity=VELOCITY): """ Move in a straight line. positive X is forward positive Y is left positive Z is up :param distance_x_m: The distance to travel along the X-axis (meters) :param distance_y_m: The distance to travel along the Y-axis (meters) :param distance_z_m: The distance to travel along the Z-axis (meters) :param velocity: the velocity of the motion (meters/second) :return: """ distance = math.sqrt(distance_x_m * distance_x_m + distance_y_m * distance_y_m + distance_z_m * distance_z_m) flight_time = distance / velocity velocity_x = velocity * distance_x_m / distance velocity_y = velocity * distance_y_m / distance velocity_z = velocity * distance_z_m / distance self.start_linear_motion(velocity_x, velocity_y, velocity_z) time.sleep(flight_time) self.stop()
Start a circular motion to the left. This function returns immediately. :param radius_m: The radius of the circle (meters) :param velocity: The velocity of the motion (meters/second) :return:
def start_circle_left(self, radius_m, velocity=VELOCITY): """ Start a circular motion to the left. This function returns immediately. :param radius_m: The radius of the circle (meters) :param velocity: The velocity of the motion (meters/second) :return: """ circumference = 2 * radius_m * math.pi rate = 360.0 * velocity / circumference self._set_vel_setpoint(velocity, 0.0, 0.0, -rate)
Start a linear motion. This function returns immediately. positive X is forward positive Y is left positive Z is up :param velocity_x_m: The velocity along the X-axis (meters/second) :param velocity_y_m: The velocity along the Y-axis (meters/second) :param velocity_z_m: The velocity along the Z-axis (meters/second) :return:
def start_linear_motion(self, velocity_x_m, velocity_y_m, velocity_z_m): """ Start a linear motion. This function returns immediately. positive X is forward positive Y is left positive Z is up :param velocity_x_m: The velocity along the X-axis (meters/second) :param velocity_y_m: The velocity along the Y-axis (meters/second) :param velocity_z_m: The velocity along the Z-axis (meters/second) :return: """ self._set_vel_setpoint( velocity_x_m, velocity_y_m, velocity_z_m, 0.0)
Set the velocity setpoint to use for the future motion
def set_vel_setpoint(self, velocity_x, velocity_y, velocity_z, rate_yaw): """Set the velocity setpoint to use for the future motion""" self._queue.put((velocity_x, velocity_y, velocity_z, rate_yaw))
This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.
def _connected(self, link_uri): """ This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.""" print('Connected to %s' % link_uri) mems = self._cf.mem.get_mems(MemoryElement.TYPE_1W) print('Found {} 1-wire memories'.format(len(mems))) if len(mems) > 0: print('Erasing memory {}'.format(mems[0].id)) mems[0].erase(self._data_written)
Returns a list of CrazyRadio devices currently connected to the computer
def _find_devices(): """ Returns a list of CrazyRadio devices currently connected to the computer """ ret = [] logger.info('Looking for devices....') if pyusb1: for d in usb.core.find(idVendor=USB_VID, idProduct=USB_PID, find_all=1, backend=pyusb_backend): ret.append(d) else: busses = usb.busses() for bus in busses: for device in bus.devices: if device.idVendor == USB_VID: if device.idProduct == USB_PID: ret += [device, ] return ret
Send a packet and receive the ack from the radio dongle The ack contains information about the packet transmition and a data payload if the ack packet contained any
def send_packet(self, dataOut): """ Send a packet and receive the ack from the radio dongle The ack contains information about the packet transmition and a data payload if the ack packet contained any """ try: if (pyusb1 is False): self.handle.bulkWrite(1, dataOut, 20) else: self.handle.write(endpoint=1, data=dataOut, timeout=20) except usb.USBError: pass
This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.
def _connected(self, link_uri): """ This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.""" print('Connected to %s' % link_uri) # Print the param TOC p_toc = self._cf.param.toc.toc for group in sorted(p_toc.keys()): print('{}'.format(group)) for param in sorted(p_toc[group].keys()): print('\t{}'.format(param)) self._param_check_list.append('{0}.{1}'.format(group, param)) self._param_groups.append('{}'.format(group)) # For every group, register the callback self._cf.param.add_update_callback(group=group, name=None, cb=self._param_callback) # You can also register a callback for a specific group.name combo self._cf.param.add_update_callback(group='cpu', name='flash', cb=self._cpu_flash_callback) print('')
Generic callback registered for all the groups
def _param_callback(self, name, value): """Generic callback registered for all the groups""" print('{0}: {1}'.format(name, value)) # Remove each parameter from the list and close the link when # all are fetched self._param_check_list.remove(name) if len(self._param_check_list) == 0: print('Have fetched all parameter values.') # First remove all the group callbacks for g in self._param_groups: self._cf.param.remove_update_callback(group=g, cb=self._param_callback) # Create a new random value [0.00,1.00] for pid_attitude.pitch_kd # and set it pkd = random.random() print('') print('Write: pid_attitude.pitch_kd={:.2f}'.format(pkd)) self._cf.param.add_update_callback(group='pid_attitude', name='pitch_kd', cb=self._a_pitch_kd_callback) # When setting a value the parameter is automatically read back # and the registered callbacks will get the updated value self._cf.param.set_value('pid_attitude.pitch_kd', '{:.2f}'.format(pkd))
Callback for pid_attitude.pitch_kd
def _a_pitch_kd_callback(self, name, value): """Callback for pid_attitude.pitch_kd""" print('Readback: {0}={1}'.format(name, value)) # End the example by closing the link (will cause the app to quit) self._cf.close_link()
Connect the link driver to a specified URI of the format: radio://<dongle nbr>/<radio channel>/[250K,1M,2M] The callback for linkQuality can be called at any moment from the driver to report back the link quality in percentage. The callback from linkError will be called when a error occurs with an error message.
def connect(self, uri, link_quality_callback, link_error_callback): """ Connect the link driver to a specified URI of the format: radio://<dongle nbr>/<radio channel>/[250K,1M,2M] The callback for linkQuality can be called at any moment from the driver to report back the link quality in percentage. The callback from linkError will be called when a error occurs with an error message. """ # check if the URI is a radio URI if not re.search('^radio://', uri): raise WrongUriType('Not a radio URI') # Open the USB dongle if not re.search('^radio://([0-9a-fA-F]+)((/([0-9]+))' '((/(250K|1M|2M))?(/([A-F0-9]+))?)?)?$', uri): raise WrongUriType('Wrong radio URI format!') uri_data = re.search('^radio://([0-9a-fA-F]+)((/([0-9]+))' '((/(250K|1M|2M))?(/([A-F0-9]+))?)?)?$', uri) self.uri = uri if len(uri_data.group(1)) < 10 and uri_data.group(1).isdigit(): devid = int(uri_data.group(1)) else: try: devid = crazyradio.get_serials().index( uri_data.group(1).upper()) except ValueError: raise Exception('Cannot find radio with serial {}'.format( uri_data.group(1))) channel = 2 if uri_data.group(4): channel = int(uri_data.group(4)) datarate = Crazyradio.DR_2MPS if uri_data.group(7) == '250K': datarate = Crazyradio.DR_250KPS if uri_data.group(7) == '1M': datarate = Crazyradio.DR_1MPS if uri_data.group(7) == '2M': datarate = Crazyradio.DR_2MPS address = DEFAULT_ADDR_A if uri_data.group(9): addr = str(uri_data.group(9)) new_addr = struct.unpack('<BBBBB', binascii.unhexlify(addr)) address = new_addr if self._radio_manager is None: self._radio_manager = _RadioManager(devid, channel, datarate, address) else: raise Exception('Link already open!') with self._radio_manager as cradio: if cradio.version >= 0.4: cradio.set_arc(_nr_of_arc_retries) else: logger.warning('Radio version <0.4 will be obsoleted soon!') # Prepare the inter-thread communication queue self.in_queue = queue.Queue() # Limited size out queue to avoid "ReadBack" effect self.out_queue = queue.Queue(1) # Launch the comm thread self._thread = _RadioDriverThread(self._radio_manager, self.in_queue, self.out_queue, link_quality_callback, link_error_callback, self) self._thread.start() self.link_error_callback = link_error_callback
Send the packet pk though the link
def send_packet(self, pk): """ Send the packet pk though the link """ try: self.out_queue.put(pk, True, 2) except queue.Full: if self.link_error_callback: self.link_error_callback('RadioDriver: Could not send packet' ' to copter')
Close the link.
def close(self): """ Close the link. """ # Stop the comm thread self._thread.stop() # Close the USB dongle if self._radio_manager: self._radio_manager.close() self._radio_manager = None while not self.out_queue.empty(): self.out_queue.get() # Clear callbacks self.link_error_callback = None self.link_quality_callback = None
Scan for Crazyflies between the supplied channels.
def _scan_radio_channels(self, cradio, start=0, stop=125): """ Scan for Crazyflies between the supplied channels. """ return list(cradio.scan_channels(start, stop, (0xff,)))
Scan interface for Crazyflies
def scan_interface(self, address): """ Scan interface for Crazyflies """ if self._radio_manager is None: try: self._radio_manager = _RadioManager(0) except Exception: return [] with self._radio_manager as cradio: # FIXME: implements serial number in the Crazyradio driver! serial = 'N/A' logger.info('v%s dongle with serial %s found', cradio.version, serial) found = [] if address is not None: addr = '{:X}'.format(address) new_addr = struct.unpack('<BBBBB', binascii.unhexlify(addr)) cradio.set_address(new_addr) cradio.set_arc(1) cradio.set_data_rate(cradio.DR_250KPS) if address is None or address == DEFAULT_ADDR: found += [['radio://0/{}/250K'.format(c), ''] for c in self._scan_radio_channels(cradio)] cradio.set_data_rate(cradio.DR_1MPS) found += [['radio://0/{}/1M'.format(c), ''] for c in self._scan_radio_channels(cradio)] cradio.set_data_rate(cradio.DR_2MPS) found += [['radio://0/{}/2M'.format(c), ''] for c in self._scan_radio_channels(cradio)] else: found += [['radio://0/{}/250K/{:X}'.format(c, address), ''] for c in self._scan_radio_channels(cradio)] cradio.set_data_rate(cradio.DR_1MPS) found += [['radio://0/{}/1M/{:X}'.format(c, address), ''] for c in self._scan_radio_channels(cradio)] cradio.set_data_rate(cradio.DR_2MPS) found += [['radio://0/{}/2M/{:X}'.format(c, address), ''] for c in self._scan_radio_channels(cradio)] self._radio_manager.close() self._radio_manager = None return found
Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet)
def _send_packet_safe(self, cr, packet): """ Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet) """ # packet = bytearray(packet) packet[0] &= 0xF3 packet[0] |= self._curr_up << 3 | self._curr_down << 2 resp = cr.send_packet(packet) if resp and resp.ack and len(resp.data) and \ (resp.data[0] & 0x04) == (self._curr_down << 2): self._curr_down = 1 - self._curr_down if resp and resp.ack: self._curr_up = 1 - self._curr_up return resp
Run the receiver thread
def run(self): """ Run the receiver thread """ dataOut = array.array('B', [0xFF]) waitTime = 0 emptyCtr = 0 # Try up to 10 times to enable the safelink mode with self._radio_manager as cradio: for _ in range(10): resp = cradio.send_packet((0xff, 0x05, 0x01)) if resp and resp.data and tuple(resp.data) == ( 0xff, 0x05, 0x01): self._has_safelink = True self._curr_up = 0 self._curr_down = 0 break self._link.needs_resending = not self._has_safelink while (True): if (self._sp): break with self._radio_manager as cradio: try: if self._has_safelink: ackStatus = self._send_packet_safe(cradio, dataOut) else: ackStatus = cradio.send_packet(dataOut) except Exception as e: import traceback self._link_error_callback( 'Error communicating with crazy radio ,it has ' 'probably been unplugged!\nException:%s\n\n%s' % ( e, traceback.format_exc())) # Analyse the in data packet ... if ackStatus is None: logger.info('Dongle reported ACK status == None') continue if (self._link_quality_callback is not None): # track the mean of a sliding window of the last N packets retry = 10 - ackStatus.retry self._retries.append(retry) self._retry_sum += retry if len(self._retries) > 100: self._retry_sum -= self._retries.popleft() link_quality = float(self._retry_sum) / len(self._retries) * 10 self._link_quality_callback(link_quality) # If no copter, retry if ackStatus.ack is False: self._retry_before_disconnect = \ self._retry_before_disconnect - 1 if (self._retry_before_disconnect == 0 and self._link_error_callback is not None): self._link_error_callback('Too many packets lost') continue self._retry_before_disconnect = _nr_of_retries data = ackStatus.data # If there is a copter in range, the packet is analysed and the # next packet to send is prepared if (len(data) > 0): inPacket = CRTPPacket(data[0], list(data[1:])) self._in_queue.put(inPacket) waitTime = 0 emptyCtr = 0 else: emptyCtr += 1 if (emptyCtr > 10): emptyCtr = 10 # Relaxation time if the last 10 packet where empty waitTime = 0.01 else: waitTime = 0 # get the next packet to send of relaxation (wait 10ms) outPacket = None try: outPacket = self._out_queue.get(True, waitTime) except queue.Empty: outPacket = None dataOut = array.array('B') if outPacket: dataOut.append(outPacket.header) for X in outPacket.data: if type(X) == int: dataOut.append(X) else: dataOut.append(ord(X)) else: dataOut.append(0xFF)
This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.
def _connected(self, link_uri): """ This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.""" print('Connected to %s' % link_uri) mems = self._cf.mem.get_mems(MemoryElement.TYPE_1W) print('Found {} 1-wire memories'.format(len(mems))) if len(mems) > 0: print('Writing test configuration to' ' memory {}'.format(mems[0].id)) mems[0].vid = 0xBC mems[0].pid = 0xFF board_name_id = OWElement.element_mapping[1] board_rev_id = OWElement.element_mapping[2] mems[0].elements[board_name_id] = 'Test board' mems[0].elements[board_rev_id] = 'A' mems[0].write_data(self._data_written)
Command-line tool for obtaining authorization and credentials from a user. This tool uses the OAuth 2.0 Authorization Code grant as described in section 1.3.1 of RFC6749: https://tools.ietf.org/html/rfc6749#section-1.3.1 This tool is intended for assist developers in obtaining credentials for testing applications where it may not be possible or easy to run a complete OAuth 2.0 authorization flow, especially in the case of code samples or embedded devices without input / display capabilities. This is not intended for production use where a combination of companion and on-device applications should complete the OAuth 2.0 authorization flow to get authorization from the users.
def main(client_secrets, scope, save, credentials, headless): """Command-line tool for obtaining authorization and credentials from a user. This tool uses the OAuth 2.0 Authorization Code grant as described in section 1.3.1 of RFC6749: https://tools.ietf.org/html/rfc6749#section-1.3.1 This tool is intended for assist developers in obtaining credentials for testing applications where it may not be possible or easy to run a complete OAuth 2.0 authorization flow, especially in the case of code samples or embedded devices without input / display capabilities. This is not intended for production use where a combination of companion and on-device applications should complete the OAuth 2.0 authorization flow to get authorization from the users. """ flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file( client_secrets, scopes=scope ) if not headless: creds = flow.run_local_server() else: creds = flow.run_console() creds_data = { 'token': creds.token, 'refresh_token': creds.refresh_token, 'token_uri': creds.token_uri, 'client_id': creds.client_id, 'client_secret': creds.client_secret, 'scopes': creds.scopes } if save: del creds_data['token'] config_path = os.path.dirname(credentials) if config_path and not os.path.isdir(config_path): os.makedirs(config_path) with open(credentials, 'w') as outfile: json.dump(creds_data, outfile) click.echo('credentials saved: %s' % credentials) else: click.echo(json.dumps(creds_data))
Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Raises: ValueError: If the client configuration is not in the correct format. Returns: Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new oauthlib session and the validated client configuration. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets
def session_from_client_config(client_config, scopes, **kwargs): """Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Raises: ValueError: If the client configuration is not in the correct format. Returns: Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new oauthlib session and the validated client configuration. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets """ if 'web' in client_config: config = client_config['web'] elif 'installed' in client_config: config = client_config['installed'] else: raise ValueError( 'Client secrets must be for a web or installed app.') if not _REQUIRED_CONFIG_KEYS.issubset(config.keys()): raise ValueError('Client secrets is not in the correct format.') session = requests_oauthlib.OAuth2Session( client_id=config['client_id'], scope=scopes, **kwargs) return session, client_config
Creates a :class:`requests_oauthlib.OAuth2Session` instance from a Google-format client secrets file. Args: client_secrets_file (str): The path to the `client secrets`_ .json file. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new oauthlib session and the validated client configuration. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets
def session_from_client_secrets_file(client_secrets_file, scopes, **kwargs): """Creates a :class:`requests_oauthlib.OAuth2Session` instance from a Google-format client secrets file. Args: client_secrets_file (str): The path to the `client secrets`_ .json file. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new oauthlib session and the validated client configuration. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets """ with open(client_secrets_file, 'r') as json_file: client_config = json.load(json_file) return session_from_client_config(client_config, scopes, **kwargs)
Creates :class:`google.oauth2.credentials.Credentials` from a :class:`requests_oauthlib.OAuth2Session`. :meth:`fetch_token` must be called on the session before before calling this. This uses the session's auth token and the provided client configuration to create :class:`google.oauth2.credentials.Credentials`. This allows you to use the credentials from the session with Google API client libraries. Args: session (requests_oauthlib.OAuth2Session): The OAuth 2.0 session. client_config (Mapping[str, Any]): The subset of the client configuration to use. For example, if you have a web client you would pass in `client_config['web']`. Returns: google.oauth2.credentials.Credentials: The constructed credentials. Raises: ValueError: If there is no access token in the session.
def credentials_from_session(session, client_config=None): """Creates :class:`google.oauth2.credentials.Credentials` from a :class:`requests_oauthlib.OAuth2Session`. :meth:`fetch_token` must be called on the session before before calling this. This uses the session's auth token and the provided client configuration to create :class:`google.oauth2.credentials.Credentials`. This allows you to use the credentials from the session with Google API client libraries. Args: session (requests_oauthlib.OAuth2Session): The OAuth 2.0 session. client_config (Mapping[str, Any]): The subset of the client configuration to use. For example, if you have a web client you would pass in `client_config['web']`. Returns: google.oauth2.credentials.Credentials: The constructed credentials. Raises: ValueError: If there is no access token in the session. """ client_config = client_config if client_config is not None else {} if not session.token: raise ValueError( 'There is no access token for this session, did you call ' 'fetch_token?') credentials = google.oauth2.credentials.Credentials( session.token['access_token'], refresh_token=session.token.get('refresh_token'), id_token=session.token.get('id_token'), token_uri=client_config.get('token_uri'), client_id=client_config.get('client_id'), client_secret=client_config.get('client_secret'), scopes=session.scope) credentials.expiry = datetime.datetime.utcfromtimestamp( session.token['expires_at']) return credentials
Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets
def from_client_config(cls, client_config, scopes, **kwargs): """Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets """ if 'web' in client_config: client_type = 'web' elif 'installed' in client_config: client_type = 'installed' else: raise ValueError( 'Client secrets must be for a web or installed app.') session, client_config = ( google_auth_oauthlib.helpers.session_from_client_config( client_config, scopes, **kwargs)) redirect_uri = kwargs.get('redirect_uri', None) return cls(session, client_type, client_config, redirect_uri)
Creates a :class:`Flow` instance from a Google client secrets file. Args: client_secrets_file (str): The path to the client secrets .json file. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance.
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs): """Creates a :class:`Flow` instance from a Google client secrets file. Args: client_secrets_file (str): The path to the client secrets .json file. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. """ with open(client_secrets_file, 'r') as json_file: client_config = json.load(json_file) return cls.from_client_config(client_config, scopes=scopes, **kwargs)
Generates an authorization URL. This is the first step in the OAuth 2.0 Authorization Flow. The user's browser should be redirected to the returned URL. This method calls :meth:`requests_oauthlib.OAuth2Session.authorization_url` and specifies the client configuration's authorization URI (usually Google's authorization server) and specifies that "offline" access is desired. This is required in order to obtain a refresh token. Args: kwargs: Additional arguments passed through to :meth:`requests_oauthlib.OAuth2Session.authorization_url` Returns: Tuple[str, str]: The generated authorization URL and state. The user must visit the URL to complete the flow. The state is used when completing the flow to verify that the request originated from your application. If your application is using a different :class:`Flow` instance to obtain the token, you will need to specify the ``state`` when constructing the :class:`Flow`.
def authorization_url(self, **kwargs): """Generates an authorization URL. This is the first step in the OAuth 2.0 Authorization Flow. The user's browser should be redirected to the returned URL. This method calls :meth:`requests_oauthlib.OAuth2Session.authorization_url` and specifies the client configuration's authorization URI (usually Google's authorization server) and specifies that "offline" access is desired. This is required in order to obtain a refresh token. Args: kwargs: Additional arguments passed through to :meth:`requests_oauthlib.OAuth2Session.authorization_url` Returns: Tuple[str, str]: The generated authorization URL and state. The user must visit the URL to complete the flow. The state is used when completing the flow to verify that the request originated from your application. If your application is using a different :class:`Flow` instance to obtain the token, you will need to specify the ``state`` when constructing the :class:`Flow`. """ kwargs.setdefault('access_type', 'offline') url, state = self.oauth2session.authorization_url( self.client_config['auth_uri'], **kwargs) return url, state
Completes the Authorization Flow and obtains an access token. This is the final step in the OAuth 2.0 Authorization Flow. This is called after the user consents. This method calls :meth:`requests_oauthlib.OAuth2Session.fetch_token` and specifies the client configuration's token URI (usually Google's token server). Args: kwargs: Arguments passed through to :meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least one of ``code`` or ``authorization_response`` must be specified. Returns: Mapping[str, str]: The obtained tokens. Typically, you will not use return value of this function and instead and use :meth:`credentials` to obtain a :class:`~google.auth.credentials.Credentials` instance.
def fetch_token(self, **kwargs): """Completes the Authorization Flow and obtains an access token. This is the final step in the OAuth 2.0 Authorization Flow. This is called after the user consents. This method calls :meth:`requests_oauthlib.OAuth2Session.fetch_token` and specifies the client configuration's token URI (usually Google's token server). Args: kwargs: Arguments passed through to :meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least one of ``code`` or ``authorization_response`` must be specified. Returns: Mapping[str, str]: The obtained tokens. Typically, you will not use return value of this function and instead and use :meth:`credentials` to obtain a :class:`~google.auth.credentials.Credentials` instance. """ kwargs.setdefault('client_secret', self.client_config['client_secret']) return self.oauth2session.fetch_token( self.client_config['token_uri'], **kwargs)
Run the flow using the console strategy. The console strategy instructs the user to open the authorization URL in their browser. Once the authorization is complete the authorization server will give the user a code. The user then must copy & paste this code into the application. The code is then exchanged for a token. Args: authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. authorization_code_message (str): The message to display when prompting the user for the authorization code. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user.
def run_console( self, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, authorization_code_message=_DEFAULT_AUTH_CODE_MESSAGE, **kwargs): """Run the flow using the console strategy. The console strategy instructs the user to open the authorization URL in their browser. Once the authorization is complete the authorization server will give the user a code. The user then must copy & paste this code into the application. The code is then exchanged for a token. Args: authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. authorization_code_message (str): The message to display when prompting the user for the authorization code. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user. """ kwargs.setdefault('prompt', 'consent') self.redirect_uri = self._OOB_REDIRECT_URI auth_url, _ = self.authorization_url(**kwargs) print(authorization_prompt_message.format(url=auth_url)) code = input(authorization_code_message) self.fetch_token(code=code) return self.credentials
Run the flow using the server strategy. The server strategy instructs the user to open the authorization URL in their browser and will attempt to automatically open the URL for them. It will start a local web server to listen for the authorization response. Once authorization is complete the authorization server will redirect the user's browser to the local web server. The web server will get the authorization code from the response and shutdown. The code is then exchanged for a token. Args: host (str): The hostname for the local redirect server. This will be served over http, not https. port (int): The port for the local redirect server. authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. success_message (str): The message to display in the web browser the authorization flow is complete. open_browser (bool): Whether or not to open the authorization URL in the user's browser. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user.
def run_local_server( self, host='localhost', port=8080, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, success_message=_DEFAULT_WEB_SUCCESS_MESSAGE, open_browser=True, **kwargs): """Run the flow using the server strategy. The server strategy instructs the user to open the authorization URL in their browser and will attempt to automatically open the URL for them. It will start a local web server to listen for the authorization response. Once authorization is complete the authorization server will redirect the user's browser to the local web server. The web server will get the authorization code from the response and shutdown. The code is then exchanged for a token. Args: host (str): The hostname for the local redirect server. This will be served over http, not https. port (int): The port for the local redirect server. authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. success_message (str): The message to display in the web browser the authorization flow is complete. open_browser (bool): Whether or not to open the authorization URL in the user's browser. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user. """ self.redirect_uri = 'http://{}:{}/'.format(host, port) auth_url, _ = self.authorization_url(**kwargs) wsgi_app = _RedirectWSGIApp(success_message) local_server = wsgiref.simple_server.make_server( host, port, wsgi_app, handler_class=_WSGIRequestHandler) if open_browser: webbrowser.open(auth_url, new=1, autoraise=True) print(authorization_prompt_message.format(url=auth_url)) local_server.handle_request() # Note: using https here because oauthlib is very picky that # OAuth 2.0 should only occur over https. authorization_response = wsgi_app.last_request_uri.replace( 'http', 'https') self.fetch_token(authorization_response=authorization_response) return self.credentials
Install an opener. Arguments: opener (`Opener`): an `Opener` instance, or a callable that returns an opener instance. Note: May be used as a class decorator. For example:: registry = Registry() @registry.install class ArchiveOpener(Opener): protocols = ['zip', 'tar']
def install(self, opener): # type: (Union[Type[Opener], Opener, Callable[[], Opener]]) -> None """Install an opener. Arguments: opener (`Opener`): an `Opener` instance, or a callable that returns an opener instance. Note: May be used as a class decorator. For example:: registry = Registry() @registry.install class ArchiveOpener(Opener): protocols = ['zip', 'tar'] """ _opener = opener if isinstance(opener, Opener) else opener() assert isinstance(_opener, Opener), "Opener instance required" assert _opener.protocols, "must list one or more protocols" for protocol in _opener.protocols: self._protocols[protocol] = _opener return opener
`list`: the list of supported protocols.
def protocols(self): # type: () -> List[Text] """`list`: the list of supported protocols. """ _protocols = list(self._protocols) if self.load_extern: _protocols.extend( entry_point.name for entry_point in pkg_resources.iter_entry_points("fs.opener") ) _protocols = list(collections.OrderedDict.fromkeys(_protocols)) return _protocols
Get the opener class associated to a given protocol. Arguments: protocol (str): A filesystem protocol. Returns: Opener: an opener instance. Raises: ~fs.opener.errors.UnsupportedProtocol: If no opener could be found for the given protocol. EntryPointLoadingError: If the returned entry point is not an `Opener` subclass or could not be loaded successfully.
def get_opener(self, protocol): # type: (Text) -> Opener """Get the opener class associated to a given protocol. Arguments: protocol (str): A filesystem protocol. Returns: Opener: an opener instance. Raises: ~fs.opener.errors.UnsupportedProtocol: If no opener could be found for the given protocol. EntryPointLoadingError: If the returned entry point is not an `Opener` subclass or could not be loaded successfully. """ protocol = protocol or self.default_opener if self.load_extern: entry_point = next( pkg_resources.iter_entry_points("fs.opener", protocol), None ) else: entry_point = None # If not entry point was loaded from the extensions, try looking # into the registered protocols if entry_point is None: if protocol in self._protocols: opener_instance = self._protocols[protocol] else: raise UnsupportedProtocol( "protocol '{}' is not supported".format(protocol) ) # If an entry point was found in an extension, attempt to load it else: try: opener = entry_point.load() except Exception as exception: raise EntryPointError( "could not load entry point; {}".format(exception) ) if not issubclass(opener, Opener): raise EntryPointError("entry point did not return an opener") try: opener_instance = opener() except Exception as exception: raise EntryPointError( "could not instantiate opener; {}".format(exception) ) return opener_instance
Open a filesystem from a FS URL. Returns a tuple of a filesystem object and a path. If there is no path in the FS URL, the path value will be `None`. Arguments: fs_url (str): A filesystem URL. writeable (bool, optional): `True` if the filesystem must be writeable. create (bool, optional): `True` if the filesystem should be created if it does not exist. cwd (str): The current working directory. Returns: (FS, str): a tuple of ``(<filesystem>, <path from url>)``
def open( self, fs_url, # type: Text writeable=True, # type: bool create=False, # type: bool cwd=".", # type: Text default_protocol="osfs", # type: Text ): # type: (...) -> Tuple[FS, Text] """Open a filesystem from a FS URL. Returns a tuple of a filesystem object and a path. If there is no path in the FS URL, the path value will be `None`. Arguments: fs_url (str): A filesystem URL. writeable (bool, optional): `True` if the filesystem must be writeable. create (bool, optional): `True` if the filesystem should be created if it does not exist. cwd (str): The current working directory. Returns: (FS, str): a tuple of ``(<filesystem>, <path from url>)`` """ if "://" not in fs_url: # URL may just be a path fs_url = "{}://{}".format(default_protocol, fs_url) parse_result = parse_fs_url(fs_url) protocol = parse_result.protocol open_path = parse_result.path opener = self.get_opener(protocol) open_fs = opener.open_fs(fs_url, parse_result, writeable, create, cwd) return open_fs, open_path
Open a filesystem from a FS URL (ignoring the path component). Arguments: fs_url (str): A filesystem URL. writeable (bool, optional): `True` if the filesystem must be writeable. create (bool, optional): `True` if the filesystem should be created if it does not exist. cwd (str): The current working directory (generally only relevant for OS filesystems). default_protocol (str): The protocol to use if one is not supplied in the FS URL (defaults to ``"osfs"``). Returns: ~fs.base.FS: A filesystem instance.
def open_fs( self, fs_url, # type: Union[FS, Text] writeable=False, # type: bool create=False, # type: bool cwd=".", # type: Text default_protocol="osfs", # type: Text ): # type: (...) -> FS """Open a filesystem from a FS URL (ignoring the path component). Arguments: fs_url (str): A filesystem URL. writeable (bool, optional): `True` if the filesystem must be writeable. create (bool, optional): `True` if the filesystem should be created if it does not exist. cwd (str): The current working directory (generally only relevant for OS filesystems). default_protocol (str): The protocol to use if one is not supplied in the FS URL (defaults to ``"osfs"``). Returns: ~fs.base.FS: A filesystem instance. """ from ..base import FS if isinstance(fs_url, FS): _fs = fs_url else: _fs, _path = self.open( fs_url, writeable=writeable, create=create, cwd=cwd, default_protocol=default_protocol, ) return _fs
Get a context manager to open and close a filesystem. Arguments: fs_url (FS or str): A filesystem instance or a FS URL. create (bool, optional): If `True`, then create the filesystem if it doesn't already exist. writeable (bool, optional): If `True`, then the filesystem must be writeable. cwd (str): The current working directory, if opening a `~fs.osfs.OSFS`. Sometimes it is convenient to be able to pass either a FS object *or* an FS URL to a function. This context manager handles the required logic for that. Example: >>> def print_ls(list_fs): ... '''List a directory.''' ... with manage_fs(list_fs) as fs: ... print(' '.join(fs.listdir())) This function may be used in two ways. You may either pass a ``str``, as follows:: >>> print_list('zip://projects.zip') Or, an filesystem instance:: >>> from fs.osfs import OSFS >>> projects_fs = OSFS('~/') >>> print_list(projects_fs)
def manage_fs( self, fs_url, # type: Union[FS, Text] create=False, # type: bool writeable=False, # type: bool cwd=".", # type: Text ): # type: (...) -> Iterator[FS] """Get a context manager to open and close a filesystem. Arguments: fs_url (FS or str): A filesystem instance or a FS URL. create (bool, optional): If `True`, then create the filesystem if it doesn't already exist. writeable (bool, optional): If `True`, then the filesystem must be writeable. cwd (str): The current working directory, if opening a `~fs.osfs.OSFS`. Sometimes it is convenient to be able to pass either a FS object *or* an FS URL to a function. This context manager handles the required logic for that. Example: >>> def print_ls(list_fs): ... '''List a directory.''' ... with manage_fs(list_fs) as fs: ... print(' '.join(fs.listdir())) This function may be used in two ways. You may either pass a ``str``, as follows:: >>> print_list('zip://projects.zip') Or, an filesystem instance:: >>> from fs.osfs import OSFS >>> projects_fs = OSFS('~/') >>> print_list(projects_fs) """ from ..base import FS if isinstance(fs_url, FS): yield fs_url else: _fs = self.open_fs(fs_url, create=create, writeable=writeable, cwd=cwd) try: yield _fs except: raise finally: _fs.close()
Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy.
def copy_fs( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy. """ return copy_dir( src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers )
Copy the contents of one filesystem to another, checking times. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy file is always executed. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable):A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
def copy_fs_if_newer( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy the contents of one filesystem to another, checking times. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy file is always executed. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable):A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy. """ return copy_dir_if_newer( src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers )
Determine if source file is newer than destination file. Arguments: src_fs (FS): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the source file is newer than the destination file or file modification time cannot be determined, `False` otherwise.
def _source_is_newer(src_fs, src_path, dst_fs, dst_path): # type: (FS, Text, FS, Text) -> bool """Determine if source file is newer than destination file. Arguments: src_fs (FS): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the source file is newer than the destination file or file modification time cannot be determined, `False` otherwise. """ try: if dst_fs.exists(dst_path): namespace = ("details", "modified") src_modified = src_fs.getinfo(src_path, namespace).modified if src_modified is not None: dst_modified = dst_fs.getinfo(dst_path, namespace).modified return dst_modified is None or src_modified > dst_modified return True except FSError: # pragma: no cover # todo: should log something here return True
Copy a file from one filesystem to another. If the destination exists, and is a file, it will be first truncated. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem.
def copy_file( src_fs, # type: Union[FS, Text] src_path, # type: Text dst_fs, # type: Union[FS, Text] dst_path, # type: Text ): # type: (...) -> None """Copy a file from one filesystem to another. If the destination exists, and is a file, it will be first truncated. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. """ with manage_fs(src_fs, writeable=False) as _src_fs: with manage_fs(dst_fs, create=True) as _dst_fs: if _src_fs is _dst_fs: # Same filesystem, so we can do a potentially optimized # copy _src_fs.copy(src_path, dst_path, overwrite=True) else: # Standard copy with _src_fs.lock(), _dst_fs.lock(): if _dst_fs.hassyspath(dst_path): with _dst_fs.openbin(dst_path, "w") as write_file: _src_fs.download(src_path, write_file) else: with _src_fs.openbin(src_path) as read_file: _dst_fs.upload(dst_path, read_file)
Low level copy, that doesn't call manage_fs or lock. If the destination exists, and is a file, it will be first truncated. This method exists to optimize copying in loops. In general you should prefer `copy_file`. Arguments: src_fs (FS): Source filesystem. src_path (str): Path to a file on the source filesystem. dst_fs (FS: Destination filesystem. dst_path (str): Path to a file on the destination filesystem.
def copy_file_internal( src_fs, # type: FS src_path, # type: Text dst_fs, # type: FS dst_path, # type: Text ): # type: (...) -> None """Low level copy, that doesn't call manage_fs or lock. If the destination exists, and is a file, it will be first truncated. This method exists to optimize copying in loops. In general you should prefer `copy_file`. Arguments: src_fs (FS): Source filesystem. src_path (str): Path to a file on the source filesystem. dst_fs (FS: Destination filesystem. dst_path (str): Path to a file on the destination filesystem. """ if src_fs is dst_fs: # Same filesystem, so we can do a potentially optimized # copy src_fs.copy(src_path, dst_path, overwrite=True) elif dst_fs.hassyspath(dst_path): with dst_fs.openbin(dst_path, "w") as write_file: src_fs.download(src_path, write_file) else: with src_fs.openbin(src_path) as read_file: dst_fs.upload(dst_path, read_file)
Copy a file from one filesystem to another, checking times. If the destination exists, and is a file, it will be first truncated. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy is always executed. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the file copy was executed, `False` otherwise.
def copy_file_if_newer( src_fs, # type: Union[FS, Text] src_path, # type: Text dst_fs, # type: Union[FS, Text] dst_path, # type: Text ): # type: (...) -> bool """Copy a file from one filesystem to another, checking times. If the destination exists, and is a file, it will be first truncated. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy is always executed. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the file copy was executed, `False` otherwise. """ with manage_fs(src_fs, writeable=False) as _src_fs: with manage_fs(dst_fs, create=True) as _dst_fs: if _src_fs is _dst_fs: # Same filesystem, so we can do a potentially optimized # copy if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path): _src_fs.copy(src_path, dst_path, overwrite=True) return True else: return False else: # Standard copy with _src_fs.lock(), _dst_fs.lock(): if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path): copy_file_internal(_src_fs, src_path, _dst_fs, dst_path) return True else: return False
Copy directories (but not files) from ``src_fs`` to ``dst_fs``. Arguments: src_fs (FS or str): Source filesystem (instance or URL). dst_fs (FS or str): Destination filesystem (instance or URL). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``.
def copy_structure( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] ): # type: (...) -> None """Copy directories (but not files) from ``src_fs`` to ``dst_fs``. Arguments: src_fs (FS or str): Source filesystem (instance or URL). dst_fs (FS or str): Destination filesystem (instance or URL). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. """ walker = walker or Walker() with manage_fs(src_fs) as _src_fs: with manage_fs(dst_fs, create=True) as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): for dir_path in walker.dirs(_src_fs): _dst_fs.makedir(dir_path, recreate=True)
Copy a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
def copy_dir( src_fs, # type: Union[FS, Text] src_path, # type: Text dst_fs, # type: Union[FS, Text] dst_path, # type: Text walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy. """ on_copy = on_copy or (lambda *args: None) walker = walker or Walker() _src_path = abspath(normpath(src_path)) _dst_path = abspath(normpath(dst_path)) def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) from ._bulk import Copier with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _thread_safe = is_thread_safe(_src_fs, _dst_fs) with Copier(num_workers=workers if _thread_safe else 0) as copier: _dst_fs.makedir(_dst_path, recreate=True) for dir_path, dirs, files in walker.walk(_src_fs, _src_path): copy_path = combine(_dst_path, frombase(_src_path, dir_path)) for info in dirs: _dst_fs.makedir(info.make_path(copy_path), recreate=True) for info in files: src_path = info.make_path(dir_path) dst_path = info.make_path(copy_path) copier.copy(_src_fs, src_path, _dst_fs, dst_path) on_copy(_src_fs, src_path, _dst_fs, dst_path)
Copy a directory from one filesystem to another, checking times. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy is always executed. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
def copy_dir_if_newer( src_fs, # type: Union[FS, Text] src_path, # type: Text dst_fs, # type: Union[FS, Text] dst_path, # type: Text walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy a directory from one filesystem to another, checking times. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy is always executed. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy. """ on_copy = on_copy or (lambda *args: None) walker = walker or Walker() _src_path = abspath(normpath(src_path)) _dst_path = abspath(normpath(dst_path)) def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) from ._bulk import Copier with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _thread_safe = is_thread_safe(_src_fs, _dst_fs) with Copier(num_workers=workers if _thread_safe else 0) as copier: _dst_fs.makedir(_dst_path, recreate=True) namespace = ("details", "modified") dst_state = { path: info for path, info in walker.info(_dst_fs, _dst_path, namespace) if info.is_file } src_state = [ (path, info) for path, info in walker.info(_src_fs, _src_path, namespace) ] for dir_path, copy_info in src_state: copy_path = combine(_dst_path, frombase(_src_path, dir_path)) if copy_info.is_dir: _dst_fs.makedir(copy_path, recreate=True) elif copy_info.is_file: # dst file is present, try to figure out if copy # is necessary try: src_modified = copy_info.modified dst_modified = dst_state[dir_path].modified except KeyError: do_copy = True else: do_copy = ( src_modified is None or dst_modified is None or src_modified > dst_modified ) if do_copy: copier.copy(_src_fs, dir_path, _dst_fs, copy_path) on_copy(_src_fs, dir_path, _dst_fs, copy_path)
Extract code and message from ftp error.
def _parse_ftp_error(error): # type: (ftplib.Error) -> Tuple[Text, Text] """Extract code and message from ftp error.""" code, _, message = text_type(error).partition(" ") return code, message
Open an ftp object for the file.
def _open_ftp(self): # type: () -> FTP """Open an ftp object for the file.""" ftp = self.fs._open_ftp() ftp.voidcmd(str("TYPE I")) return ftp
Parse a dict of features from FTP feat response.
def _parse_features(cls, feat_response): # type: (Text) -> Dict[Text, Text] """Parse a dict of features from FTP feat response. """ features = {} if feat_response.split("-")[0] == "211": for line in feat_response.splitlines(): if line.startswith(" "): key, _, value = line[1:].partition(" ") features[key] = value return features
Open a new ftp object.
def _open_ftp(self): # type: () -> FTP """Open a new ftp object. """ _ftp = FTP() _ftp.set_debuglevel(0) with ftp_errors(self): _ftp.connect(self.host, self.port, self.timeout) _ftp.login(self.user, self.passwd, self.acct) self._features = {} try: feat_response = _decode(_ftp.sendcmd("FEAT"), "latin-1") except error_perm: # pragma: no cover self.encoding = "latin-1" else: self._features = self._parse_features(feat_response) self.encoding = "utf-8" if "UTF8" in self._features else "latin-1" if not PY2: _ftp.file = _ftp.sock.makefile( # type: ignore "r", encoding=self.encoding ) _ftp.encoding = self.encoding self._welcome = _ftp.welcome return _ftp
Get the FTP url this filesystem will open.
def ftp_url(self): # type: () -> Text """Get the FTP url this filesystem will open.""" url = ( "ftp://{}".format(self.host) if self.port == 21 else "ftp://{}:{}".format(self.host, self.port) ) return url
Parse a time from an ftp directory listing.
def _parse_ftp_time(cls, time_text): # type: (Text) -> Optional[int] """Parse a time from an ftp directory listing. """ try: tm_year = int(time_text[0:4]) tm_month = int(time_text[4:6]) tm_day = int(time_text[6:8]) tm_hour = int(time_text[8:10]) tm_min = int(time_text[10:12]) tm_sec = int(time_text[12:14]) except ValueError: return None epoch_time = calendar.timegm( (tm_year, tm_month, tm_day, tm_hour, tm_min, tm_sec) ) return epoch_time
Write the contents of a filesystem to a zip file. Arguments: src_fs (~fs.base.FS): The source filesystem to compress. file (str or io.IOBase): Destination file, may be a file name or an open file object. compression (int): Compression to use (one of the constants defined in the `zipfile` module in the stdlib). Defaults to `zipfile.ZIP_DEFLATED`. encoding (str): The encoding to use for filenames. The default is ``"utf-8"``, use ``"CP437"`` if compatibility with WinZip is desired. walker (~fs.walk.Walker, optional): A `Walker` instance, or `None` to use default walker. You can use this to specify which files you want to compress.
def write_zip( src_fs, # type: FS file, # type: Union[Text, BinaryIO] compression=zipfile.ZIP_DEFLATED, # type: int encoding="utf-8", # type: Text walker=None, # type: Optional[Walker] ): # type: (...) -> None """Write the contents of a filesystem to a zip file. Arguments: src_fs (~fs.base.FS): The source filesystem to compress. file (str or io.IOBase): Destination file, may be a file name or an open file object. compression (int): Compression to use (one of the constants defined in the `zipfile` module in the stdlib). Defaults to `zipfile.ZIP_DEFLATED`. encoding (str): The encoding to use for filenames. The default is ``"utf-8"``, use ``"CP437"`` if compatibility with WinZip is desired. walker (~fs.walk.Walker, optional): A `Walker` instance, or `None` to use default walker. You can use this to specify which files you want to compress. """ _zip = zipfile.ZipFile(file, mode="w", compression=compression, allowZip64=True) walker = walker or Walker() with _zip: gen_walk = walker.info(src_fs, namespaces=["details", "stat", "access"]) for path, info in gen_walk: # Zip names must be relative, directory names must end # with a slash. zip_name = relpath(path + "/" if info.is_dir else path) if not six.PY3: # Python2 expects bytes filenames zip_name = zip_name.encode(encoding, "replace") if info.has_namespace("stat"): # If the file has a stat namespace, get the # zip time directory from the stat structure st_mtime = info.get("stat", "st_mtime", None) _mtime = time.localtime(st_mtime) zip_time = _mtime[0:6] # type: ZipTime else: # Otherwise, use the modified time from details # namespace. mt = info.modified or datetime.utcnow() zip_time = (mt.year, mt.month, mt.day, mt.hour, mt.minute, mt.second) # NOTE(@althonos): typeshed's `zipfile.py` on declares # ZipInfo.__init__ for Python < 3 ?! zip_info = zipfile.ZipInfo(zip_name, zip_time) # type: ignore try: if info.permissions is not None: zip_info.external_attr = info.permissions.mode << 16 except MissingInfoNamespace: pass if info.is_dir: zip_info.external_attr |= 0x10 # This is how to record directories with zipfile _zip.writestr(zip_info, b"") else: # Get a syspath if possible try: sys_path = src_fs.getsyspath(path) except NoSysPath: # Write from bytes _zip.writestr(zip_info, src_fs.readbytes(path)) else: # Write from a file which is (presumably) # more memory efficient _zip.write(sys_path, zip_name)
Write the contents of a filesystem to a tar file. Arguments: file (str or io.IOBase): Destination file, may be a file name or an open file object. compression (str, optional): Compression to use, or `None` for a plain Tar archive without compression. encoding(str): The encoding to use for filenames. The default is ``"utf-8"``. walker (~fs.walk.Walker, optional): A `Walker` instance, or `None` to use default walker. You can use this to specify which files you want to compress.
def write_tar( src_fs, # type: FS file, # type: Union[Text, BinaryIO] compression=None, # type: Optional[Text] encoding="utf-8", # type: Text walker=None, # type: Optional[Walker] ): # type: (...) -> None """Write the contents of a filesystem to a tar file. Arguments: file (str or io.IOBase): Destination file, may be a file name or an open file object. compression (str, optional): Compression to use, or `None` for a plain Tar archive without compression. encoding(str): The encoding to use for filenames. The default is ``"utf-8"``. walker (~fs.walk.Walker, optional): A `Walker` instance, or `None` to use default walker. You can use this to specify which files you want to compress. """ type_map = { ResourceType.block_special_file: tarfile.BLKTYPE, ResourceType.character: tarfile.CHRTYPE, ResourceType.directory: tarfile.DIRTYPE, ResourceType.fifo: tarfile.FIFOTYPE, ResourceType.file: tarfile.REGTYPE, ResourceType.socket: tarfile.AREGTYPE, # no type for socket ResourceType.symlink: tarfile.SYMTYPE, ResourceType.unknown: tarfile.AREGTYPE, # no type for unknown } tar_attr = [("uid", "uid"), ("gid", "gid"), ("uname", "user"), ("gname", "group")] mode = "w:{}".format(compression or "") if isinstance(file, (six.text_type, six.binary_type)): _tar = tarfile.open(file, mode=mode) else: _tar = tarfile.open(fileobj=file, mode=mode) current_time = time.time() walker = walker or Walker() with _tar: gen_walk = walker.info(src_fs, namespaces=["details", "stat", "access"]) for path, info in gen_walk: # Tar names must be relative tar_name = relpath(path) if not six.PY3: # Python2 expects bytes filenames tar_name = tar_name.encode(encoding, "replace") tar_info = tarfile.TarInfo(tar_name) if info.has_namespace("stat"): mtime = info.get("stat", "st_mtime", current_time) else: mtime = info.modified or current_time if isinstance(mtime, datetime): mtime = datetime_to_epoch(mtime) if isinstance(mtime, float): mtime = int(mtime) tar_info.mtime = mtime for tarattr, infoattr in tar_attr: if getattr(info, infoattr, None) is not None: setattr(tar_info, tarattr, getattr(info, infoattr, None)) if info.has_namespace("access"): tar_info.mode = getattr(info.permissions, "mode", 0o420) if info.is_dir: tar_info.type = tarfile.DIRTYPE _tar.addfile(tar_info) else: tar_info.type = type_map.get(info.type, tarfile.REGTYPE) tar_info.size = info.size with src_fs.openbin(path) as bin_file: _tar.addfile(tar_info, bin_file)
Compare a glob pattern with a path (case sensitive). Arguments: pattern (str): A glob pattern. path (str): A path. Returns: bool: ``True`` if the path matches the pattern. Example: >>> from fs.glob import match >>> match("**/*.py", "/fs/glob.py") True
def match(pattern, path): # type: (str, str) -> bool """Compare a glob pattern with a path (case sensitive). Arguments: pattern (str): A glob pattern. path (str): A path. Returns: bool: ``True`` if the path matches the pattern. Example: >>> from fs.glob import match >>> match("**/*.py", "/fs/glob.py") True """ try: levels, recursive, re_pattern = _PATTERN_CACHE[(pattern, True)] except KeyError: levels, recursive, re_pattern = _translate_glob(pattern, case_sensitive=True) _PATTERN_CACHE[(pattern, True)] = (levels, recursive, re_pattern) return bool(re_pattern.match(path))
Count files / directories / data in matched paths. Example: >>> import fs >>> fs.open_fs('~/projects').glob('**/*.py').count() Counts(files=18519, directories=0, data=206690458) Returns: `~Counts`: A named tuple containing results.
def count(self): # type: () -> Counts """Count files / directories / data in matched paths. Example: >>> import fs >>> fs.open_fs('~/projects').glob('**/*.py').count() Counts(files=18519, directories=0, data=206690458) Returns: `~Counts`: A named tuple containing results. """ directories = 0 files = 0 data = 0 for path, info in self._make_iter(namespaces=["details"]): if info.is_dir: directories += 1 else: files += 1 data += info.size return Counts(directories=directories, files=files, data=data)
Count the lines in the matched files. Returns: `~LineCounts`: A named tuple containing line counts. Example: >>> import fs >>> fs.open_fs('~/projects').glob('**/*.py').count_lines() LineCounts(lines=5767102, non_blank=4915110)
def count_lines(self): # type: () -> LineCounts """Count the lines in the matched files. Returns: `~LineCounts`: A named tuple containing line counts. Example: >>> import fs >>> fs.open_fs('~/projects').glob('**/*.py').count_lines() LineCounts(lines=5767102, non_blank=4915110) """ lines = 0 non_blank = 0 for path, info in self._make_iter(): if info.is_file: for line in self.fs.open(path, "rb"): lines += 1 if line.rstrip(): non_blank += 1 return LineCounts(lines=lines, non_blank=non_blank)
Removed all matched paths. Returns: int: Number of file and directories removed. Example: >>> import fs >>> fs.open_fs('~/projects/my_project').glob('**/*.pyc').remove() 29
def remove(self): # type: () -> int """Removed all matched paths. Returns: int: Number of file and directories removed. Example: >>> import fs >>> fs.open_fs('~/projects/my_project').glob('**/*.pyc').remove() 29 """ removes = 0 for path, info in self._make_iter(search="depth"): if info.is_dir: self.fs.removetree(path) else: self.fs.remove(path) removes += 1 return removes
Move a file from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on ``src_fs``. dst_fs (FS or str); Destination filesystem (instance or URL). dst_path (str): Path to a file on ``dst_fs``.
def move_file( src_fs, # type: Union[Text, FS] src_path, # type: Text dst_fs, # type: Union[Text, FS] dst_path, # type: Text ): # type: (...) -> None """Move a file from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on ``src_fs``. dst_fs (FS or str); Destination filesystem (instance or URL). dst_path (str): Path to a file on ``dst_fs``. """ with manage_fs(src_fs) as _src_fs: with manage_fs(dst_fs, create=True) as _dst_fs: if _src_fs is _dst_fs: # Same filesystem, may be optimized _src_fs.move(src_path, dst_path, overwrite=True) else: # Standard copy and delete with _src_fs.lock(), _dst_fs.lock(): copy_file(_src_fs, src_path, _dst_fs, dst_path) _src_fs.remove(src_path)
Move a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on ``src_fs`` dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on ``dst_fs``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy.
def move_dir( src_fs, # type: Union[Text, FS] src_path, # type: Text dst_fs, # type: Union[Text, FS] dst_path, # type: Text workers=0, # type: int ): # type: (...) -> None """Move a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on ``src_fs`` dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on ``dst_fs``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy. """ def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _dst_fs.makedir(dst_path, recreate=True) copy_dir(src_fs, src_path, dst_fs, dst_path, workers=workers) _src_fs.removetree(src_path)