body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
bd85efa671a1f14b6e1c453ef648cfe4eba62192fd507fa4157d11d6124d818d
def set_low_power(self): '!\n @brief disabled the the magn, gyro sensor to reduce power consumption\n ' self.soft_reset() time.sleep(0.1) self.set_magn_conf() time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 18) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 23) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 27) time.sleep(0.1)
! @brief disabled the the magn, gyro sensor to reduce power consumption
python/raspberrypi/DFRobot_BMX160.py
set_low_power
DFRobot/DFRobot_BMX160
9
python
def set_low_power(self): '!\n @brief disabled the the magn, gyro sensor to reduce power consumption\n ' self.soft_reset() time.sleep(0.1) self.set_magn_conf() time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 18) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 23) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 27) time.sleep(0.1)
def set_low_power(self): '!\n @brief disabled the the magn, gyro sensor to reduce power consumption\n ' self.soft_reset() time.sleep(0.1) self.set_magn_conf() time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 18) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 23) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 27) time.sleep(0.1)<|docstring|>! @brief disabled the the magn, gyro sensor to reduce power consumption<|endoftext|>
e9a55d8ddf3c30a568518fe712019ba488d74966d87afb4f73ff7f4084c38ece
def wake_up(self): '!\n @brief enabled the the magn, gyro sensor\n ' self.soft_reset() time.sleep(0.1) self.set_magn_conf() time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 17) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 21) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 25) time.sleep(0.1)
! @brief enabled the the magn, gyro sensor
python/raspberrypi/DFRobot_BMX160.py
wake_up
DFRobot/DFRobot_BMX160
9
python
def wake_up(self): '!\n @brief enabled the the magn, gyro sensor\n ' self.soft_reset() time.sleep(0.1) self.set_magn_conf() time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 17) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 21) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 25) time.sleep(0.1)
def wake_up(self): '!\n @brief enabled the the magn, gyro sensor\n ' self.soft_reset() time.sleep(0.1) self.set_magn_conf() time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 17) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 21) time.sleep(0.1) self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, 25) time.sleep(0.1)<|docstring|>! @brief enabled the the magn, gyro sensor<|endoftext|>
4370be6036e6b98f244359e5638885a0e04c72aa2ee7bf6bc95599ffceb36dbc
def soft_reset(self): '!\n @brief reset bmx160 hardware\n @return returns the reset status\n @retval True reset succeeded\n @retval False reset failed\n ' data = self.BMX160_SOFT_RESET_CMD self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, data) time.sleep(0.015) return True
! @brief reset bmx160 hardware @return returns the reset status @retval True reset succeeded @retval False reset failed
python/raspberrypi/DFRobot_BMX160.py
soft_reset
DFRobot/DFRobot_BMX160
9
python
def soft_reset(self): '!\n @brief reset bmx160 hardware\n @return returns the reset status\n @retval True reset succeeded\n @retval False reset failed\n ' data = self.BMX160_SOFT_RESET_CMD self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, data) time.sleep(0.015) return True
def soft_reset(self): '!\n @brief reset bmx160 hardware\n @return returns the reset status\n @retval True reset succeeded\n @retval False reset failed\n ' data = self.BMX160_SOFT_RESET_CMD self.write_bmx_reg(self._BMX160_COMMAND_REG_ADDR, data) time.sleep(0.015) return True<|docstring|>! @brief reset bmx160 hardware @return returns the reset status @retval True reset succeeded @retval False reset failed<|endoftext|>
204194f8733df7d26713e66feff52020961af595ab5d75fdffba21d308c534a5
def set_magn_conf(self): '!\n @brief set magnetometer Config\n ' self.write_bmx_reg(self._BMX160_MAGN_IF_0_ADDR, 128) time.sleep(0.05) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 1) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 75) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 4) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 81) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 14) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 82) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 2) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 76) self.write_bmx_reg(self._BMX160_MAGN_IF_1_ADDR, 66) self.write_bmx_reg(self._BMX160_MAGN_CONFIG_ADDR, 8) self.write_bmx_reg(self._BMX160_MAGN_IF_0_ADDR, 3) time.sleep(0.05)
! @brief set magnetometer Config
python/raspberrypi/DFRobot_BMX160.py
set_magn_conf
DFRobot/DFRobot_BMX160
9
python
def set_magn_conf(self): '!\n @brief set magnetometer Config\n ' self.write_bmx_reg(self._BMX160_MAGN_IF_0_ADDR, 128) time.sleep(0.05) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 1) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 75) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 4) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 81) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 14) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 82) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 2) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 76) self.write_bmx_reg(self._BMX160_MAGN_IF_1_ADDR, 66) self.write_bmx_reg(self._BMX160_MAGN_CONFIG_ADDR, 8) self.write_bmx_reg(self._BMX160_MAGN_IF_0_ADDR, 3) time.sleep(0.05)
def set_magn_conf(self): '!\n @brief set magnetometer Config\n ' self.write_bmx_reg(self._BMX160_MAGN_IF_0_ADDR, 128) time.sleep(0.05) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 1) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 75) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 4) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 81) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 14) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 82) self.write_bmx_reg(self._BMX160_MAGN_IF_3_ADDR, 2) self.write_bmx_reg(self._BMX160_MAGN_IF_2_ADDR, 76) self.write_bmx_reg(self._BMX160_MAGN_IF_1_ADDR, 66) self.write_bmx_reg(self._BMX160_MAGN_CONFIG_ADDR, 8) self.write_bmx_reg(self._BMX160_MAGN_IF_0_ADDR, 3) time.sleep(0.05)<|docstring|>! @brief set magnetometer Config<|endoftext|>
d3348b78aec9da4c50c63813330647d8e1fc3ded53202e80b3eb24dedb701f2d
def set_gyro_range(self, bits): '!\n @brief set gyroscope angular rate range and resolution.\n @param bits \n @n GyroRange_125DPS Gyroscope sensitivity at 125dps\n @n GyroRange_250DPS Gyroscope sensitivity at 250dps\n @n GyroRange_500DPS Gyroscope sensitivity at 500dps\n @n GyroRange_1000DPS Gyroscope sensitivity at 1000dps\n @n GyroRange_2000DPS Gyroscope sensitivity at 2000dps\n ' if (bits == 0): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_125DPS elif (bits == 1): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_250DPS elif (bits == 2): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_500DPS elif (bits == 3): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_1000DPS elif (bits == 4): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_2000DPS else: self.gyroRange = self._BMX160_GYRO_SENSITIVITY_250DPS
! @brief set gyroscope angular rate range and resolution. @param bits @n GyroRange_125DPS Gyroscope sensitivity at 125dps @n GyroRange_250DPS Gyroscope sensitivity at 250dps @n GyroRange_500DPS Gyroscope sensitivity at 500dps @n GyroRange_1000DPS Gyroscope sensitivity at 1000dps @n GyroRange_2000DPS Gyroscope sensitivity at 2000dps
python/raspberrypi/DFRobot_BMX160.py
set_gyro_range
DFRobot/DFRobot_BMX160
9
python
def set_gyro_range(self, bits): '!\n @brief set gyroscope angular rate range and resolution.\n @param bits \n @n GyroRange_125DPS Gyroscope sensitivity at 125dps\n @n GyroRange_250DPS Gyroscope sensitivity at 250dps\n @n GyroRange_500DPS Gyroscope sensitivity at 500dps\n @n GyroRange_1000DPS Gyroscope sensitivity at 1000dps\n @n GyroRange_2000DPS Gyroscope sensitivity at 2000dps\n ' if (bits == 0): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_125DPS elif (bits == 1): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_250DPS elif (bits == 2): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_500DPS elif (bits == 3): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_1000DPS elif (bits == 4): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_2000DPS else: self.gyroRange = self._BMX160_GYRO_SENSITIVITY_250DPS
def set_gyro_range(self, bits): '!\n @brief set gyroscope angular rate range and resolution.\n @param bits \n @n GyroRange_125DPS Gyroscope sensitivity at 125dps\n @n GyroRange_250DPS Gyroscope sensitivity at 250dps\n @n GyroRange_500DPS Gyroscope sensitivity at 500dps\n @n GyroRange_1000DPS Gyroscope sensitivity at 1000dps\n @n GyroRange_2000DPS Gyroscope sensitivity at 2000dps\n ' if (bits == 0): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_125DPS elif (bits == 1): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_250DPS elif (bits == 2): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_500DPS elif (bits == 3): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_1000DPS elif (bits == 4): self.gyroRange = self._BMX160_GYRO_SENSITIVITY_2000DPS else: self.gyroRange = self._BMX160_GYRO_SENSITIVITY_250DPS<|docstring|>! @brief set gyroscope angular rate range and resolution. @param bits @n GyroRange_125DPS Gyroscope sensitivity at 125dps @n GyroRange_250DPS Gyroscope sensitivity at 250dps @n GyroRange_500DPS Gyroscope sensitivity at 500dps @n GyroRange_1000DPS Gyroscope sensitivity at 1000dps @n GyroRange_2000DPS Gyroscope sensitivity at 2000dps<|endoftext|>
081d13dd78d25d69bb975f61f6045f4ddd3019b439841565999c73e10fa0d1cf
def set_accel_range(self, bits): '!\n @brief allow the selection of the accelerometer g-range.\n @param bits \n @n AccelRange_2G Macro for mg per LSB at +/- 2g sensitivity (1 LSB = 0.000061035mg) \n @n AccelRange_4G Macro for mg per LSB at +/- 4g sensitivity (1 LSB = 0.000122070mg) \n @n AccelRange_8G Macro for mg per LSB at +/- 8g sensitivity (1 LSB = 0.000244141mg) \n @n AccelRange_16G Macro for mg per LSB at +/- 16g sensitivity (1 LSB = 0.000488281mg)\n ' if (bits == 0): self.accelRange = self._BMX160_ACCEL_MG_LSB_2G elif (bits == 1): self.accelRange = self._BMX160_ACCEL_MG_LSB_4G elif (bits == 2): self.accelRange = self._BMX160_ACCEL_MG_LSB_8G elif (bits == 3): self.accelRange = self._BMX160_ACCEL_MG_LSB_16G else: self.accelRange = self._BMX160_ACCEL_MG_LSB_2G
! @brief allow the selection of the accelerometer g-range. @param bits @n AccelRange_2G Macro for mg per LSB at +/- 2g sensitivity (1 LSB = 0.000061035mg) @n AccelRange_4G Macro for mg per LSB at +/- 4g sensitivity (1 LSB = 0.000122070mg) @n AccelRange_8G Macro for mg per LSB at +/- 8g sensitivity (1 LSB = 0.000244141mg) @n AccelRange_16G Macro for mg per LSB at +/- 16g sensitivity (1 LSB = 0.000488281mg)
python/raspberrypi/DFRobot_BMX160.py
set_accel_range
DFRobot/DFRobot_BMX160
9
python
def set_accel_range(self, bits): '!\n @brief allow the selection of the accelerometer g-range.\n @param bits \n @n AccelRange_2G Macro for mg per LSB at +/- 2g sensitivity (1 LSB = 0.000061035mg) \n @n AccelRange_4G Macro for mg per LSB at +/- 4g sensitivity (1 LSB = 0.000122070mg) \n @n AccelRange_8G Macro for mg per LSB at +/- 8g sensitivity (1 LSB = 0.000244141mg) \n @n AccelRange_16G Macro for mg per LSB at +/- 16g sensitivity (1 LSB = 0.000488281mg)\n ' if (bits == 0): self.accelRange = self._BMX160_ACCEL_MG_LSB_2G elif (bits == 1): self.accelRange = self._BMX160_ACCEL_MG_LSB_4G elif (bits == 2): self.accelRange = self._BMX160_ACCEL_MG_LSB_8G elif (bits == 3): self.accelRange = self._BMX160_ACCEL_MG_LSB_16G else: self.accelRange = self._BMX160_ACCEL_MG_LSB_2G
def set_accel_range(self, bits): '!\n @brief allow the selection of the accelerometer g-range.\n @param bits \n @n AccelRange_2G Macro for mg per LSB at +/- 2g sensitivity (1 LSB = 0.000061035mg) \n @n AccelRange_4G Macro for mg per LSB at +/- 4g sensitivity (1 LSB = 0.000122070mg) \n @n AccelRange_8G Macro for mg per LSB at +/- 8g sensitivity (1 LSB = 0.000244141mg) \n @n AccelRange_16G Macro for mg per LSB at +/- 16g sensitivity (1 LSB = 0.000488281mg)\n ' if (bits == 0): self.accelRange = self._BMX160_ACCEL_MG_LSB_2G elif (bits == 1): self.accelRange = self._BMX160_ACCEL_MG_LSB_4G elif (bits == 2): self.accelRange = self._BMX160_ACCEL_MG_LSB_8G elif (bits == 3): self.accelRange = self._BMX160_ACCEL_MG_LSB_16G else: self.accelRange = self._BMX160_ACCEL_MG_LSB_2G<|docstring|>! @brief allow the selection of the accelerometer g-range. @param bits @n AccelRange_2G Macro for mg per LSB at +/- 2g sensitivity (1 LSB = 0.000061035mg) @n AccelRange_4G Macro for mg per LSB at +/- 4g sensitivity (1 LSB = 0.000122070mg) @n AccelRange_8G Macro for mg per LSB at +/- 8g sensitivity (1 LSB = 0.000244141mg) @n AccelRange_16G Macro for mg per LSB at +/- 16g sensitivity (1 LSB = 0.000488281mg)<|endoftext|>
3910a6b9ad282f8ecd015359f9e4bf2f201089230fc4bbec75c3b7c291b235e7
def get_all_data(self): '!\n @brief get the magn, gyro and accel data \n @return all data\n ' data = self.read_bmx_reg(self._BMX160_MAG_DATA_ADDR) if (data[1] & 128): magnx = ((- 65536) + ((data[1] << 8) | data[0])) else: magnx = ((data[1] << 8) | data[0]) if (data[3] & 128): magny = ((- 65536) + ((data[3] << 8) | data[2])) else: magny = ((data[3] << 8) | data[2]) if (data[5] & 128): magnz = ((- 65536) + ((data[5] << 8) | data[4])) else: magnz = ((data[5] << 8) | data[4]) if (data[9] & 128): gyrox = ((- 65536) + ((data[9] << 8) | data[8])) else: gyrox = ((data[9] << 8) | data[8]) if (data[11] & 128): gyroy = ((- 65536) + ((data[11] << 8) | data[10])) else: gyroy = ((data[11] << 8) | data[10]) if (data[13] & 128): gyroz = ((- 65536) + ((data[13] << 8) | data[12])) else: gyroz = ((data[13] << 8) | data[12]) if (data[15] & 128): accelx = ((- 65536) + ((data[15] << 8) | data[14])) else: accelx = ((data[15] << 8) | data[14]) if (data[17] & 128): accely = ((- 65536) + ((data[17] << 8) | data[16])) else: accely = ((data[17] << 8) | data[16]) if (data[19] & 128): accelz = ((- 65536) + ((data[19] << 8) | data[18])) else: accelz = ((data[19] << 8) | data[18]) magnx *= self.BMX160_MAGN_UT_LSB magny *= self.BMX160_MAGN_UT_LSB magnz *= self.BMX160_MAGN_UT_LSB gyrox *= self.gyroRange gyroy *= self.gyroRange gyroz *= self.gyroRange accelx *= (self.accelRange * 9.8) accely *= (self.accelRange * 9.8) accelz *= (self.accelRange * 9.8) out_put = [] out_put.append(magnx) out_put.append(magny) out_put.append(magnz) out_put.append(gyrox) out_put.append(gyroy) out_put.append(gyroz) out_put.append(accelx) out_put.append(accely) out_put.append(accelz) return out_put
! @brief get the magn, gyro and accel data @return all data
python/raspberrypi/DFRobot_BMX160.py
get_all_data
DFRobot/DFRobot_BMX160
9
python
def get_all_data(self): '!\n @brief get the magn, gyro and accel data \n @return all data\n ' data = self.read_bmx_reg(self._BMX160_MAG_DATA_ADDR) if (data[1] & 128): magnx = ((- 65536) + ((data[1] << 8) | data[0])) else: magnx = ((data[1] << 8) | data[0]) if (data[3] & 128): magny = ((- 65536) + ((data[3] << 8) | data[2])) else: magny = ((data[3] << 8) | data[2]) if (data[5] & 128): magnz = ((- 65536) + ((data[5] << 8) | data[4])) else: magnz = ((data[5] << 8) | data[4]) if (data[9] & 128): gyrox = ((- 65536) + ((data[9] << 8) | data[8])) else: gyrox = ((data[9] << 8) | data[8]) if (data[11] & 128): gyroy = ((- 65536) + ((data[11] << 8) | data[10])) else: gyroy = ((data[11] << 8) | data[10]) if (data[13] & 128): gyroz = ((- 65536) + ((data[13] << 8) | data[12])) else: gyroz = ((data[13] << 8) | data[12]) if (data[15] & 128): accelx = ((- 65536) + ((data[15] << 8) | data[14])) else: accelx = ((data[15] << 8) | data[14]) if (data[17] & 128): accely = ((- 65536) + ((data[17] << 8) | data[16])) else: accely = ((data[17] << 8) | data[16]) if (data[19] & 128): accelz = ((- 65536) + ((data[19] << 8) | data[18])) else: accelz = ((data[19] << 8) | data[18]) magnx *= self.BMX160_MAGN_UT_LSB magny *= self.BMX160_MAGN_UT_LSB magnz *= self.BMX160_MAGN_UT_LSB gyrox *= self.gyroRange gyroy *= self.gyroRange gyroz *= self.gyroRange accelx *= (self.accelRange * 9.8) accely *= (self.accelRange * 9.8) accelz *= (self.accelRange * 9.8) out_put = [] out_put.append(magnx) out_put.append(magny) out_put.append(magnz) out_put.append(gyrox) out_put.append(gyroy) out_put.append(gyroz) out_put.append(accelx) out_put.append(accely) out_put.append(accelz) return out_put
def get_all_data(self): '!\n @brief get the magn, gyro and accel data \n @return all data\n ' data = self.read_bmx_reg(self._BMX160_MAG_DATA_ADDR) if (data[1] & 128): magnx = ((- 65536) + ((data[1] << 8) | data[0])) else: magnx = ((data[1] << 8) | data[0]) if (data[3] & 128): magny = ((- 65536) + ((data[3] << 8) | data[2])) else: magny = ((data[3] << 8) | data[2]) if (data[5] & 128): magnz = ((- 65536) + ((data[5] << 8) | data[4])) else: magnz = ((data[5] << 8) | data[4]) if (data[9] & 128): gyrox = ((- 65536) + ((data[9] << 8) | data[8])) else: gyrox = ((data[9] << 8) | data[8]) if (data[11] & 128): gyroy = ((- 65536) + ((data[11] << 8) | data[10])) else: gyroy = ((data[11] << 8) | data[10]) if (data[13] & 128): gyroz = ((- 65536) + ((data[13] << 8) | data[12])) else: gyroz = ((data[13] << 8) | data[12]) if (data[15] & 128): accelx = ((- 65536) + ((data[15] << 8) | data[14])) else: accelx = ((data[15] << 8) | data[14]) if (data[17] & 128): accely = ((- 65536) + ((data[17] << 8) | data[16])) else: accely = ((data[17] << 8) | data[16]) if (data[19] & 128): accelz = ((- 65536) + ((data[19] << 8) | data[18])) else: accelz = ((data[19] << 8) | data[18]) magnx *= self.BMX160_MAGN_UT_LSB magny *= self.BMX160_MAGN_UT_LSB magnz *= self.BMX160_MAGN_UT_LSB gyrox *= self.gyroRange gyroy *= self.gyroRange gyroz *= self.gyroRange accelx *= (self.accelRange * 9.8) accely *= (self.accelRange * 9.8) accelz *= (self.accelRange * 9.8) out_put = [] out_put.append(magnx) out_put.append(magny) out_put.append(magnz) out_put.append(gyrox) out_put.append(gyroy) out_put.append(gyroz) out_put.append(accelx) out_put.append(accely) out_put.append(accelz) return out_put<|docstring|>! @brief get the magn, gyro and accel data @return all data<|endoftext|>
571f5dd29341cf4b9ce5b73f3e580b408e5b724994f458b6279f8a03412245bf
def write_bmx_reg(self, register, value): '!\n @brief Write data to the BMX register\n @param register register\n @param value Data written to the BMX register\n @return return the actually written length\n ' self.i2cbus.write_byte_data(self.i2c_addr, register, value)
! @brief Write data to the BMX register @param register register @param value Data written to the BMX register @return return the actually written length
python/raspberrypi/DFRobot_BMX160.py
write_bmx_reg
DFRobot/DFRobot_BMX160
9
python
def write_bmx_reg(self, register, value): '!\n @brief Write data to the BMX register\n @param register register\n @param value Data written to the BMX register\n @return return the actually written length\n ' self.i2cbus.write_byte_data(self.i2c_addr, register, value)
def write_bmx_reg(self, register, value): '!\n @brief Write data to the BMX register\n @param register register\n @param value Data written to the BMX register\n @return return the actually written length\n ' self.i2cbus.write_byte_data(self.i2c_addr, register, value)<|docstring|>! @brief Write data to the BMX register @param register register @param value Data written to the BMX register @return return the actually written length<|endoftext|>
d3ab7eec1d2d30c80076718c421eb87e908f101623c255b48389821461038fad
def read_bmx_reg(self, register): '!\n @brief Read BMX register data\n @param register register\n @return data\n ' return self.i2cbus.read_i2c_block_data(self.i2c_addr, register)
! @brief Read BMX register data @param register register @return data
python/raspberrypi/DFRobot_BMX160.py
read_bmx_reg
DFRobot/DFRobot_BMX160
9
python
def read_bmx_reg(self, register): '!\n @brief Read BMX register data\n @param register register\n @return data\n ' return self.i2cbus.read_i2c_block_data(self.i2c_addr, register)
def read_bmx_reg(self, register): '!\n @brief Read BMX register data\n @param register register\n @return data\n ' return self.i2cbus.read_i2c_block_data(self.i2c_addr, register)<|docstring|>! @brief Read BMX register data @param register register @return data<|endoftext|>
04cd0f84ff13d5e2863fd550354c70816512789354903c49504592d820ffa73f
def scan(self): '!\n @brief iic scan function\n @return scan result\n @retval True sensor exist\n @retval False There is no sensor\n ' try: self.i2cbus.read_byte(self.i2c_addr) return True except: print('I2C init fail') return False
! @brief iic scan function @return scan result @retval True sensor exist @retval False There is no sensor
python/raspberrypi/DFRobot_BMX160.py
scan
DFRobot/DFRobot_BMX160
9
python
def scan(self): '!\n @brief iic scan function\n @return scan result\n @retval True sensor exist\n @retval False There is no sensor\n ' try: self.i2cbus.read_byte(self.i2c_addr) return True except: print('I2C init fail') return False
def scan(self): '!\n @brief iic scan function\n @return scan result\n @retval True sensor exist\n @retval False There is no sensor\n ' try: self.i2cbus.read_byte(self.i2c_addr) return True except: print('I2C init fail') return False<|docstring|>! @brief iic scan function @return scan result @retval True sensor exist @retval False There is no sensor<|endoftext|>
675e6e6e57ba2980ec7310abfce78e33ad51dcb459a3e3d3f3c3ebd4ae394eb5
def serialize_tagged_sentence(sentence_id, tagged_sentence, state='raw', pretty=False, dump=True): "\n Serialize a sentence tagged with Named entity tags s.t. it can be passed between Luigi tasks.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param tagged_sentence: Tagged sentence.\n :type tagged_sentence: list\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options.update({'indent': 4, 'sort_keys': True}) serialized_tagged_sentence = {sentence_id: {'meta': {'id': sentence_id, 'type': 'sentence', 'state': state}, 'data': tagged_sentence}} if dump: return json.dumps(serialized_tagged_sentence, **options) return serialized_tagged_sentence
Serialize a sentence tagged with Named entity tags s.t. it can be passed between Luigi tasks. :param sentence_id: ID of current sentence. :type sentence_id: int :param tagged_sentence: Tagged sentence. :type tagged_sentence: list :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict
backend/bwg/serializing.py
serialize_tagged_sentence
majdigital/bigworldgraph
3
python
def serialize_tagged_sentence(sentence_id, tagged_sentence, state='raw', pretty=False, dump=True): "\n Serialize a sentence tagged with Named entity tags s.t. it can be passed between Luigi tasks.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param tagged_sentence: Tagged sentence.\n :type tagged_sentence: list\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options.update({'indent': 4, 'sort_keys': True}) serialized_tagged_sentence = {sentence_id: {'meta': {'id': sentence_id, 'type': 'sentence', 'state': state}, 'data': tagged_sentence}} if dump: return json.dumps(serialized_tagged_sentence, **options) return serialized_tagged_sentence
def serialize_tagged_sentence(sentence_id, tagged_sentence, state='raw', pretty=False, dump=True): "\n Serialize a sentence tagged with Named entity tags s.t. it can be passed between Luigi tasks.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param tagged_sentence: Tagged sentence.\n :type tagged_sentence: list\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options.update({'indent': 4, 'sort_keys': True}) serialized_tagged_sentence = {sentence_id: {'meta': {'id': sentence_id, 'type': 'sentence', 'state': state}, 'data': tagged_sentence}} if dump: return json.dumps(serialized_tagged_sentence, **options) return serialized_tagged_sentence<|docstring|>Serialize a sentence tagged with Named entity tags s.t. it can be passed between Luigi tasks. :param sentence_id: ID of current sentence. :type sentence_id: int :param tagged_sentence: Tagged sentence. :type tagged_sentence: list :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict<|endoftext|>
590a7a21751d0875b1adc758ea01fe51b37aea65d798131f01fd1432976559a2
def serialize_dependency_parse_tree(sentence_id, parse_trees, state='raw', pretty=False, dump=True): "\n Serialize a dependency parse tree for a sentence.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param parse_trees: Parse trees for this sentence.\n :type parse_trees: dict, None\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if (type(parse_trees) != dict): if (len(parse_trees) == 0): empty_tree = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': {'root': None, 'nodes': {}}}} if dump: return json.dumps(empty_tree, **options) return empty_tree parse_tree = vars([tree for tree in parse_trees][0]) else: parse_tree = parse_trees simplified_tree = {'root': parse_tree['root']['address'], 'nodes': {int(number): filter_dict(node, DEPENDENCY_TREE_KEEP_FIELDS) for (number, node) in parse_tree['nodes'].items()}} if pretty: options['indent'] = 4 serialized_dependency_parse_tree = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': simplified_tree}} if dump: return json.dumps(serialized_dependency_parse_tree, **options) return serialized_dependency_parse_tree
Serialize a dependency parse tree for a sentence. :param sentence_id: ID of current sentence. :type sentence_id: int :param parse_trees: Parse trees for this sentence. :type parse_trees: dict, None :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict
backend/bwg/serializing.py
serialize_dependency_parse_tree
majdigital/bigworldgraph
3
python
def serialize_dependency_parse_tree(sentence_id, parse_trees, state='raw', pretty=False, dump=True): "\n Serialize a dependency parse tree for a sentence.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param parse_trees: Parse trees for this sentence.\n :type parse_trees: dict, None\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if (type(parse_trees) != dict): if (len(parse_trees) == 0): empty_tree = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': {'root': None, 'nodes': {}}}} if dump: return json.dumps(empty_tree, **options) return empty_tree parse_tree = vars([tree for tree in parse_trees][0]) else: parse_tree = parse_trees simplified_tree = {'root': parse_tree['root']['address'], 'nodes': {int(number): filter_dict(node, DEPENDENCY_TREE_KEEP_FIELDS) for (number, node) in parse_tree['nodes'].items()}} if pretty: options['indent'] = 4 serialized_dependency_parse_tree = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': simplified_tree}} if dump: return json.dumps(serialized_dependency_parse_tree, **options) return serialized_dependency_parse_tree
def serialize_dependency_parse_tree(sentence_id, parse_trees, state='raw', pretty=False, dump=True): "\n Serialize a dependency parse tree for a sentence.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param parse_trees: Parse trees for this sentence.\n :type parse_trees: dict, None\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if (type(parse_trees) != dict): if (len(parse_trees) == 0): empty_tree = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': {'root': None, 'nodes': {}}}} if dump: return json.dumps(empty_tree, **options) return empty_tree parse_tree = vars([tree for tree in parse_trees][0]) else: parse_tree = parse_trees simplified_tree = {'root': parse_tree['root']['address'], 'nodes': {int(number): filter_dict(node, DEPENDENCY_TREE_KEEP_FIELDS) for (number, node) in parse_tree['nodes'].items()}} if pretty: options['indent'] = 4 serialized_dependency_parse_tree = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': simplified_tree}} if dump: return json.dumps(serialized_dependency_parse_tree, **options) return serialized_dependency_parse_tree<|docstring|>Serialize a dependency parse tree for a sentence. :param sentence_id: ID of current sentence. :type sentence_id: int :param parse_trees: Parse trees for this sentence. :type parse_trees: dict, None :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict<|endoftext|>
efba0f7834bf2b30f1bffdb97d8dc1a7ca00b40b8e6d7947104edf98ef85470d
def serialize_sentence(sentence_id, sentence, state='raw', pretty=False, dump=True): "\n Serialize a simple sentence.\n\n :param sentence_id: ID of current sentence.\n :type sentence_id: str\n :param sentence: Sentence.\n :type sentence: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_sentence = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': sentence}} if dump: return json.dumps(serialized_sentence, **options) return serialized_sentence
Serialize a simple sentence. :param sentence_id: ID of current sentence. :type sentence_id: str :param sentence: Sentence. :type sentence: str :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict
backend/bwg/serializing.py
serialize_sentence
majdigital/bigworldgraph
3
python
def serialize_sentence(sentence_id, sentence, state='raw', pretty=False, dump=True): "\n Serialize a simple sentence.\n\n :param sentence_id: ID of current sentence.\n :type sentence_id: str\n :param sentence: Sentence.\n :type sentence: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_sentence = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': sentence}} if dump: return json.dumps(serialized_sentence, **options) return serialized_sentence
def serialize_sentence(sentence_id, sentence, state='raw', pretty=False, dump=True): "\n Serialize a simple sentence.\n\n :param sentence_id: ID of current sentence.\n :type sentence_id: str\n :param sentence: Sentence.\n :type sentence: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_sentence = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': sentence}} if dump: return json.dumps(serialized_sentence, **options) return serialized_sentence<|docstring|>Serialize a simple sentence. :param sentence_id: ID of current sentence. :type sentence_id: str :param sentence: Sentence. :type sentence: str :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict<|endoftext|>
19ee35592ab5ff8836c48b6090c0457394664c1c7c003f9778dcc7a23e16b2ba
def serialize_relation(sentence_id, sentence, relations, state='raw', infix='', pretty=False, dump=True): "\n Serialize an extracted relation.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: str\n :param sentence: Sentence.\n :type sentence: str\n :param relations: Extracted relations.\n :type relations: list\n :param infix: Infix for relation ID.\n :type infix: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_relation = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': {'sentence': sentence, 'relations': {'{}/{}{}'.format(sentence_id, infix, str(relation_id).zfill(5)): {'meta': {'id': '{}/{}{}'.format(sentence_id, infix, str(relation_id).zfill(5)), 'state': state, 'type': 'sentence'}, 'data': {'subject_phrase': subj_phrase, 'verb': verb, 'object_phrase': obj_phrase}} for ((subj_phrase, verb, obj_phrase), relation_id) in zip(relations, range(1, (len(relations) + 1)))}}}} if dump: return json.dumps(serialized_relation, **options) return serialized_relation
Serialize an extracted relation. :param sentence_id: ID of current sentence. :type sentence_id: str :param sentence: Sentence. :type sentence: str :param relations: Extracted relations. :type relations: list :param infix: Infix for relation ID. :type infix: str :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict
backend/bwg/serializing.py
serialize_relation
majdigital/bigworldgraph
3
python
def serialize_relation(sentence_id, sentence, relations, state='raw', infix=, pretty=False, dump=True): "\n Serialize an extracted relation.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: str\n :param sentence: Sentence.\n :type sentence: str\n :param relations: Extracted relations.\n :type relations: list\n :param infix: Infix for relation ID.\n :type infix: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_relation = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': {'sentence': sentence, 'relations': {'{}/{}{}'.format(sentence_id, infix, str(relation_id).zfill(5)): {'meta': {'id': '{}/{}{}'.format(sentence_id, infix, str(relation_id).zfill(5)), 'state': state, 'type': 'sentence'}, 'data': {'subject_phrase': subj_phrase, 'verb': verb, 'object_phrase': obj_phrase}} for ((subj_phrase, verb, obj_phrase), relation_id) in zip(relations, range(1, (len(relations) + 1)))}}}} if dump: return json.dumps(serialized_relation, **options) return serialized_relation
def serialize_relation(sentence_id, sentence, relations, state='raw', infix=, pretty=False, dump=True): "\n Serialize an extracted relation.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: str\n :param sentence: Sentence.\n :type sentence: str\n :param relations: Extracted relations.\n :type relations: list\n :param infix: Infix for relation ID.\n :type infix: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_relation = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'sentence'}, 'data': {'sentence': sentence, 'relations': {'{}/{}{}'.format(sentence_id, infix, str(relation_id).zfill(5)): {'meta': {'id': '{}/{}{}'.format(sentence_id, infix, str(relation_id).zfill(5)), 'state': state, 'type': 'sentence'}, 'data': {'subject_phrase': subj_phrase, 'verb': verb, 'object_phrase': obj_phrase}} for ((subj_phrase, verb, obj_phrase), relation_id) in zip(relations, range(1, (len(relations) + 1)))}}}} if dump: return json.dumps(serialized_relation, **options) return serialized_relation<|docstring|>Serialize an extracted relation. :param sentence_id: ID of current sentence. :type sentence_id: str :param sentence: Sentence. :type sentence: str :param relations: Extracted relations. :type relations: list :param infix: Infix for relation ID. :type infix: str :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict<|endoftext|>
b71b718a73f9a704e1cfce03413cf0c2fd429a8f1415a5f4564f6b6a58b4562f
def serialize_wikidata_entity(sentence_id, wikidata_entities, infix='', state='raw', pretty=False, dump=True): "\n Serialize relevant information of a Wikidata entity.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param wikidata_entities: List of Wikidata entities in a certain sentence.\n :type wikidata_entities: list\n :param infix: Infix for Wikidata entity ID.\n :type infix: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_entity = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'wikidata_entity'}, 'data': {'entities': {'{}/{}{}'.format(sentence_id, infix, str(wd_entity_id).zfill(5)): {'meta': {'id': '{}/{}{}'.format(sentence_id, infix, str(wd_entity_id).zfill(5)), 'state': state, 'type': ('Wikidata entity' if (len(wd_entity) == 1) else 'Ambiguous Wikidata entity')}, 'data': wd_entity} for (wd_entity, wd_entity_id) in zip(wikidata_entities, range(1, (len(wikidata_entities) + 1)))}}}} if dump: return json.dumps(serialized_entity, **options) return serialized_entity
Serialize relevant information of a Wikidata entity. :param sentence_id: ID of current sentence. :type sentence_id: int :param wikidata_entities: List of Wikidata entities in a certain sentence. :type wikidata_entities: list :param infix: Infix for Wikidata entity ID. :type infix: str :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict
backend/bwg/serializing.py
serialize_wikidata_entity
majdigital/bigworldgraph
3
python
def serialize_wikidata_entity(sentence_id, wikidata_entities, infix=, state='raw', pretty=False, dump=True): "\n Serialize relevant information of a Wikidata entity.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param wikidata_entities: List of Wikidata entities in a certain sentence.\n :type wikidata_entities: list\n :param infix: Infix for Wikidata entity ID.\n :type infix: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_entity = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'wikidata_entity'}, 'data': {'entities': {'{}/{}{}'.format(sentence_id, infix, str(wd_entity_id).zfill(5)): {'meta': {'id': '{}/{}{}'.format(sentence_id, infix, str(wd_entity_id).zfill(5)), 'state': state, 'type': ('Wikidata entity' if (len(wd_entity) == 1) else 'Ambiguous Wikidata entity')}, 'data': wd_entity} for (wd_entity, wd_entity_id) in zip(wikidata_entities, range(1, (len(wikidata_entities) + 1)))}}}} if dump: return json.dumps(serialized_entity, **options) return serialized_entity
def serialize_wikidata_entity(sentence_id, wikidata_entities, infix=, state='raw', pretty=False, dump=True): "\n Serialize relevant information of a Wikidata entity.\n \n :param sentence_id: ID of current sentence.\n :type sentence_id: int\n :param wikidata_entities: List of Wikidata entities in a certain sentence.\n :type wikidata_entities: list\n :param infix: Infix for Wikidata entity ID.\n :type infix: str\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 serialized_entity = {sentence_id: {'meta': {'id': sentence_id, 'state': state, 'type': 'wikidata_entity'}, 'data': {'entities': {'{}/{}{}'.format(sentence_id, infix, str(wd_entity_id).zfill(5)): {'meta': {'id': '{}/{}{}'.format(sentence_id, infix, str(wd_entity_id).zfill(5)), 'state': state, 'type': ('Wikidata entity' if (len(wd_entity) == 1) else 'Ambiguous Wikidata entity')}, 'data': wd_entity} for (wd_entity, wd_entity_id) in zip(wikidata_entities, range(1, (len(wikidata_entities) + 1)))}}}} if dump: return json.dumps(serialized_entity, **options) return serialized_entity<|docstring|>Serialize relevant information of a Wikidata entity. :param sentence_id: ID of current sentence. :type sentence_id: int :param wikidata_entities: List of Wikidata entities in a certain sentence. :type wikidata_entities: list :param infix: Infix for Wikidata entity ID. :type infix: str :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict<|endoftext|>
a47933f74d0a28d84aa61dbdfe2425ea62af9294099547a43a7cbb567c797ebe
def serialize_article(article_id, article_url, article_title, sentences, state='raw', from_scratch=True, pretty=False, dump=True): "\n Serialize a Wikipedia article.\n \n :param article_id: ID of current article.\n :type article_id: str\n :param article_url: URL of current article.\n :type article_url: str\n :param article_title: Title of current article.\n :type article_title: str\n :param sentences: Sentences of current article.\n :type sentences: list, dict\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param from_scratch: Flag to indicate whether sentences IDs and metadata should be created from scratch.\n :type from_scratch: bool\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 if from_scratch: sentences = {'{}/{}'.format(article_id, str(sentence_id).zfill(5)): {'meta': {'id': '{}/{}'.format(article_id, str(sentence_id).zfill(5)), 'type': 'sentence', 'state': state}, 'data': sentence} for (sentence, sentence_id) in zip(sentences, range(1, (len(sentences) + 1)))} serialized_article = {'meta': {'id': article_id, 'url': article_url, 'title': article_title, 'type': 'article', 'state': state}, 'data': sentences} if dump: return json.dumps(serialized_article, **options) return serialized_article
Serialize a Wikipedia article. :param article_id: ID of current article. :type article_id: str :param article_url: URL of current article. :type article_url: str :param article_title: Title of current article. :type article_title: str :param sentences: Sentences of current article. :type sentences: list, dict :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param from_scratch: Flag to indicate whether sentences IDs and metadata should be created from scratch. :type from_scratch: bool :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict
backend/bwg/serializing.py
serialize_article
majdigital/bigworldgraph
3
python
def serialize_article(article_id, article_url, article_title, sentences, state='raw', from_scratch=True, pretty=False, dump=True): "\n Serialize a Wikipedia article.\n \n :param article_id: ID of current article.\n :type article_id: str\n :param article_url: URL of current article.\n :type article_url: str\n :param article_title: Title of current article.\n :type article_title: str\n :param sentences: Sentences of current article.\n :type sentences: list, dict\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param from_scratch: Flag to indicate whether sentences IDs and metadata should be created from scratch.\n :type from_scratch: bool\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 if from_scratch: sentences = {'{}/{}'.format(article_id, str(sentence_id).zfill(5)): {'meta': {'id': '{}/{}'.format(article_id, str(sentence_id).zfill(5)), 'type': 'sentence', 'state': state}, 'data': sentence} for (sentence, sentence_id) in zip(sentences, range(1, (len(sentences) + 1)))} serialized_article = {'meta': {'id': article_id, 'url': article_url, 'title': article_title, 'type': 'article', 'state': state}, 'data': sentences} if dump: return json.dumps(serialized_article, **options) return serialized_article
def serialize_article(article_id, article_url, article_title, sentences, state='raw', from_scratch=True, pretty=False, dump=True): "\n Serialize a Wikipedia article.\n \n :param article_id: ID of current article.\n :type article_id: str\n :param article_url: URL of current article.\n :type article_url: str\n :param article_title: Title of current article.\n :type article_title: str\n :param sentences: Sentences of current article.\n :type sentences: list, dict\n :param state: State that describes the kind of processing that is applied to the data in this step. It's \n included in the metadata of each sentence.\n :type state: str\n :param from_scratch: Flag to indicate whether sentences IDs and metadata should be created from scratch.\n :type from_scratch: bool\n :param pretty: Flag to indicate whether the output should be pretty-printed.\n :type pretty: bool\n :param dump: Flag to indicate whether the serialized data should already be dumped.\n :type dump: bool\n :return: Serialized (and maybe dumped) data.\n :rtype: str, dict\n " options = {'ensure_ascii': False} if pretty: options['indent'] = 4 if from_scratch: sentences = {'{}/{}'.format(article_id, str(sentence_id).zfill(5)): {'meta': {'id': '{}/{}'.format(article_id, str(sentence_id).zfill(5)), 'type': 'sentence', 'state': state}, 'data': sentence} for (sentence, sentence_id) in zip(sentences, range(1, (len(sentences) + 1)))} serialized_article = {'meta': {'id': article_id, 'url': article_url, 'title': article_title, 'type': 'article', 'state': state}, 'data': sentences} if dump: return json.dumps(serialized_article, **options) return serialized_article<|docstring|>Serialize a Wikipedia article. :param article_id: ID of current article. :type article_id: str :param article_url: URL of current article. :type article_url: str :param article_title: Title of current article. :type article_title: str :param sentences: Sentences of current article. :type sentences: list, dict :param state: State that describes the kind of processing that is applied to the data in this step. It's included in the metadata of each sentence. :type state: str :param from_scratch: Flag to indicate whether sentences IDs and metadata should be created from scratch. :type from_scratch: bool :param pretty: Flag to indicate whether the output should be pretty-printed. :type pretty: bool :param dump: Flag to indicate whether the serialized data should already be dumped. :type dump: bool :return: Serialized (and maybe dumped) data. :rtype: str, dict<|endoftext|>
c00c3a5d9393a65b0660dab550994c9f85e4a3bbc6db05214bf1b3dfd09f330e
def get_nes_from_sentence(sentence_data, default_ne_tag, include_tag=False): '\n Extract all named entities from a named entity tagged sentence.\n \n :param sentence_data: Tagged sentence data.\n :type sentence_data: list\n :param default_ne_tag: Default Named Entity tag for words that are not Named Entities.\n :type default_ne_tag: str\n :param include_tag: Flag to indicate whether the Named entity tag should be included.\n :type include_tag: bool\n :return: Extracted Named Entities.\n :rtype: list\n ' def add_ne(nes_, current_ne_, current_ne_tag_, include_tag_): ne = ' '.join(current_ne_) to_add = (ne if (not include_tag_) else (ne, current_ne_tag_)) nes_.append(to_add) return nes_ nes = [] current_ne = [] current_ne_tag = '' for (word, ne_tag) in sentence_data: if ((ne_tag != default_ne_tag) and (not current_ne)): current_ne.append(word) current_ne_tag = ne_tag elif ((ne_tag == default_ne_tag) and current_ne): nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [] current_ne_tag = '' elif ((ne_tag != default_ne_tag) and current_ne): if (current_ne_tag == ne_tag): current_ne.append(word) else: nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [word] current_ne_tag = ne_tag return nes
Extract all named entities from a named entity tagged sentence. :param sentence_data: Tagged sentence data. :type sentence_data: list :param default_ne_tag: Default Named Entity tag for words that are not Named Entities. :type default_ne_tag: str :param include_tag: Flag to indicate whether the Named entity tag should be included. :type include_tag: bool :return: Extracted Named Entities. :rtype: list
backend/bwg/serializing.py
get_nes_from_sentence
majdigital/bigworldgraph
3
python
def get_nes_from_sentence(sentence_data, default_ne_tag, include_tag=False): '\n Extract all named entities from a named entity tagged sentence.\n \n :param sentence_data: Tagged sentence data.\n :type sentence_data: list\n :param default_ne_tag: Default Named Entity tag for words that are not Named Entities.\n :type default_ne_tag: str\n :param include_tag: Flag to indicate whether the Named entity tag should be included.\n :type include_tag: bool\n :return: Extracted Named Entities.\n :rtype: list\n ' def add_ne(nes_, current_ne_, current_ne_tag_, include_tag_): ne = ' '.join(current_ne_) to_add = (ne if (not include_tag_) else (ne, current_ne_tag_)) nes_.append(to_add) return nes_ nes = [] current_ne = [] current_ne_tag = for (word, ne_tag) in sentence_data: if ((ne_tag != default_ne_tag) and (not current_ne)): current_ne.append(word) current_ne_tag = ne_tag elif ((ne_tag == default_ne_tag) and current_ne): nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [] current_ne_tag = elif ((ne_tag != default_ne_tag) and current_ne): if (current_ne_tag == ne_tag): current_ne.append(word) else: nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [word] current_ne_tag = ne_tag return nes
def get_nes_from_sentence(sentence_data, default_ne_tag, include_tag=False): '\n Extract all named entities from a named entity tagged sentence.\n \n :param sentence_data: Tagged sentence data.\n :type sentence_data: list\n :param default_ne_tag: Default Named Entity tag for words that are not Named Entities.\n :type default_ne_tag: str\n :param include_tag: Flag to indicate whether the Named entity tag should be included.\n :type include_tag: bool\n :return: Extracted Named Entities.\n :rtype: list\n ' def add_ne(nes_, current_ne_, current_ne_tag_, include_tag_): ne = ' '.join(current_ne_) to_add = (ne if (not include_tag_) else (ne, current_ne_tag_)) nes_.append(to_add) return nes_ nes = [] current_ne = [] current_ne_tag = for (word, ne_tag) in sentence_data: if ((ne_tag != default_ne_tag) and (not current_ne)): current_ne.append(word) current_ne_tag = ne_tag elif ((ne_tag == default_ne_tag) and current_ne): nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [] current_ne_tag = elif ((ne_tag != default_ne_tag) and current_ne): if (current_ne_tag == ne_tag): current_ne.append(word) else: nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [word] current_ne_tag = ne_tag return nes<|docstring|>Extract all named entities from a named entity tagged sentence. :param sentence_data: Tagged sentence data. :type sentence_data: list :param default_ne_tag: Default Named Entity tag for words that are not Named Entities. :type default_ne_tag: str :param include_tag: Flag to indicate whether the Named entity tag should be included. :type include_tag: bool :return: Extracted Named Entities. :rtype: list<|endoftext|>
d13089261d5276a589f240f9a05a79345a88e30e6f7a336dc5cb26029ef29443
def just_dump(json_object, pretty=False): '\n Self-documenting?\n \n :param json_object: Serialized JSON object\n :type json_object: dict\n :param pretty: Prettify the JSON object.\n :type pretty: bool\n :return: Dumped JSON object.\n :rtype: str\n ' options = {'ensure_ascii': False} if pretty: options['indent'] = 4 return json.dumps(json_object, **options)
Self-documenting? :param json_object: Serialized JSON object :type json_object: dict :param pretty: Prettify the JSON object. :type pretty: bool :return: Dumped JSON object. :rtype: str
backend/bwg/serializing.py
just_dump
majdigital/bigworldgraph
3
python
def just_dump(json_object, pretty=False): '\n Self-documenting?\n \n :param json_object: Serialized JSON object\n :type json_object: dict\n :param pretty: Prettify the JSON object.\n :type pretty: bool\n :return: Dumped JSON object.\n :rtype: str\n ' options = {'ensure_ascii': False} if pretty: options['indent'] = 4 return json.dumps(json_object, **options)
def just_dump(json_object, pretty=False): '\n Self-documenting?\n \n :param json_object: Serialized JSON object\n :type json_object: dict\n :param pretty: Prettify the JSON object.\n :type pretty: bool\n :return: Dumped JSON object.\n :rtype: str\n ' options = {'ensure_ascii': False} if pretty: options['indent'] = 4 return json.dumps(json_object, **options)<|docstring|>Self-documenting? :param json_object: Serialized JSON object :type json_object: dict :param pretty: Prettify the JSON object. :type pretty: bool :return: Dumped JSON object. :rtype: str<|endoftext|>
03480b278552223818b4526fca153cdfe79ea4f4c27d14eca892dce9fdac1ed0
def deserialize_line(line, encoding='utf-8'): '\n Transform a line in a file that was created as a result from a Luigi task into its metadata and main data.\n \n :param line: Line to be serialized.\n :type line: str\n :param encoding: Encoding of line (default is utf-8).\n :type encoding: str\n ' return json.loads(line, encoding=encoding)
Transform a line in a file that was created as a result from a Luigi task into its metadata and main data. :param line: Line to be serialized. :type line: str :param encoding: Encoding of line (default is utf-8). :type encoding: str
backend/bwg/serializing.py
deserialize_line
majdigital/bigworldgraph
3
python
def deserialize_line(line, encoding='utf-8'): '\n Transform a line in a file that was created as a result from a Luigi task into its metadata and main data.\n \n :param line: Line to be serialized.\n :type line: str\n :param encoding: Encoding of line (default is utf-8).\n :type encoding: str\n ' return json.loads(line, encoding=encoding)
def deserialize_line(line, encoding='utf-8'): '\n Transform a line in a file that was created as a result from a Luigi task into its metadata and main data.\n \n :param line: Line to be serialized.\n :type line: str\n :param encoding: Encoding of line (default is utf-8).\n :type encoding: str\n ' return json.loads(line, encoding=encoding)<|docstring|>Transform a line in a file that was created as a result from a Luigi task into its metadata and main data. :param line: Line to be serialized. :type line: str :param encoding: Encoding of line (default is utf-8). :type encoding: str<|endoftext|>
ef2027d8f1d4c47293b85ff71b3df8415de34b12d419f7eb05578a46531c9906
def retry_with_fallback(triggering_error, **fallback_kwargs): '\n Rerun a function in case a specific error occurs with new arguments.\n \n :param triggering_error: Error class that triggers the decorator to re-run the function.\n :type triggering_error: Exception\n :param fallback_kwargs: Fallback named arguments that are applied when the function is re-run.\n :type fallback_kwargs: dict\n :return: Decorator.\n :rtype: func\n ' def decorator(func): '\n Actual decorator\n ' @functools.wraps(func) def func_wrapper(*args, **kwargs): try: function_result = func(*args, **kwargs) except triggering_error: kwargs.update(fallback_kwargs) function_result = func(*args, **kwargs) return function_result return func_wrapper return decorator
Rerun a function in case a specific error occurs with new arguments. :param triggering_error: Error class that triggers the decorator to re-run the function. :type triggering_error: Exception :param fallback_kwargs: Fallback named arguments that are applied when the function is re-run. :type fallback_kwargs: dict :return: Decorator. :rtype: func
backend/bwg/serializing.py
retry_with_fallback
majdigital/bigworldgraph
3
python
def retry_with_fallback(triggering_error, **fallback_kwargs): '\n Rerun a function in case a specific error occurs with new arguments.\n \n :param triggering_error: Error class that triggers the decorator to re-run the function.\n :type triggering_error: Exception\n :param fallback_kwargs: Fallback named arguments that are applied when the function is re-run.\n :type fallback_kwargs: dict\n :return: Decorator.\n :rtype: func\n ' def decorator(func): '\n Actual decorator\n ' @functools.wraps(func) def func_wrapper(*args, **kwargs): try: function_result = func(*args, **kwargs) except triggering_error: kwargs.update(fallback_kwargs) function_result = func(*args, **kwargs) return function_result return func_wrapper return decorator
def retry_with_fallback(triggering_error, **fallback_kwargs): '\n Rerun a function in case a specific error occurs with new arguments.\n \n :param triggering_error: Error class that triggers the decorator to re-run the function.\n :type triggering_error: Exception\n :param fallback_kwargs: Fallback named arguments that are applied when the function is re-run.\n :type fallback_kwargs: dict\n :return: Decorator.\n :rtype: func\n ' def decorator(func): '\n Actual decorator\n ' @functools.wraps(func) def func_wrapper(*args, **kwargs): try: function_result = func(*args, **kwargs) except triggering_error: kwargs.update(fallback_kwargs) function_result = func(*args, **kwargs) return function_result return func_wrapper return decorator<|docstring|>Rerun a function in case a specific error occurs with new arguments. :param triggering_error: Error class that triggers the decorator to re-run the function. :type triggering_error: Exception :param fallback_kwargs: Fallback named arguments that are applied when the function is re-run. :type fallback_kwargs: dict :return: Decorator. :rtype: func<|endoftext|>
08282e7543a4dcb50227b06f3e247347a5dfdc522b9dc4b73195e41f2827f6a6
def decorator(func): '\n Actual decorator\n ' @functools.wraps(func) def func_wrapper(*args, **kwargs): try: function_result = func(*args, **kwargs) except triggering_error: kwargs.update(fallback_kwargs) function_result = func(*args, **kwargs) return function_result return func_wrapper
Actual decorator
backend/bwg/serializing.py
decorator
majdigital/bigworldgraph
3
python
def decorator(func): '\n \n ' @functools.wraps(func) def func_wrapper(*args, **kwargs): try: function_result = func(*args, **kwargs) except triggering_error: kwargs.update(fallback_kwargs) function_result = func(*args, **kwargs) return function_result return func_wrapper
def decorator(func): '\n \n ' @functools.wraps(func) def func_wrapper(*args, **kwargs): try: function_result = func(*args, **kwargs) except triggering_error: kwargs.update(fallback_kwargs) function_result = func(*args, **kwargs) return function_result return func_wrapper<|docstring|>Actual decorator<|endoftext|>
65a047f88cd27a398c4728092c9bae8535a16dce574ec0e8208bdd35f449d656
def deep_merge(destination: dict, source: dict, concat_lists: bool=False) -> dict: "Recursively add all keys from `source` into `destination`.\n\n Example:\n >>> destination = {'a': 1, 'b': {'c': 3}}\n >>> source = {'a': 11, 'b': {'d': 44}, 'e': 55}\n >>> deep_merge(destination, source)\n {'a': 11, 'b': {'c': 3, 'd': 44}, 'e': 55}\n " def _deep_merge_rec(dest, src): for key in src: if (isinstance(dest.get(key), dict) and isinstance(src[key], dict)): dest[key] = _deep_merge_rec(dest[key], src[key]) elif (isinstance(dest.get(key), list) and isinstance(src[key], list) and concat_lists): dest[key].extend(src[key]) else: dest[key] = src[key] return dest return _deep_merge_rec(copy.deepcopy(destination), source)
Recursively add all keys from `source` into `destination`. Example: >>> destination = {'a': 1, 'b': {'c': 3}} >>> source = {'a': 11, 'b': {'d': 44}, 'e': 55} >>> deep_merge(destination, source) {'a': 11, 'b': {'c': 3, 'd': 44}, 'e': 55}
nestor_api/utils/dict.py
deep_merge
ChauffeurPrive/nestor-api
2
python
def deep_merge(destination: dict, source: dict, concat_lists: bool=False) -> dict: "Recursively add all keys from `source` into `destination`.\n\n Example:\n >>> destination = {'a': 1, 'b': {'c': 3}}\n >>> source = {'a': 11, 'b': {'d': 44}, 'e': 55}\n >>> deep_merge(destination, source)\n {'a': 11, 'b': {'c': 3, 'd': 44}, 'e': 55}\n " def _deep_merge_rec(dest, src): for key in src: if (isinstance(dest.get(key), dict) and isinstance(src[key], dict)): dest[key] = _deep_merge_rec(dest[key], src[key]) elif (isinstance(dest.get(key), list) and isinstance(src[key], list) and concat_lists): dest[key].extend(src[key]) else: dest[key] = src[key] return dest return _deep_merge_rec(copy.deepcopy(destination), source)
def deep_merge(destination: dict, source: dict, concat_lists: bool=False) -> dict: "Recursively add all keys from `source` into `destination`.\n\n Example:\n >>> destination = {'a': 1, 'b': {'c': 3}}\n >>> source = {'a': 11, 'b': {'d': 44}, 'e': 55}\n >>> deep_merge(destination, source)\n {'a': 11, 'b': {'c': 3, 'd': 44}, 'e': 55}\n " def _deep_merge_rec(dest, src): for key in src: if (isinstance(dest.get(key), dict) and isinstance(src[key], dict)): dest[key] = _deep_merge_rec(dest[key], src[key]) elif (isinstance(dest.get(key), list) and isinstance(src[key], list) and concat_lists): dest[key].extend(src[key]) else: dest[key] = src[key] return dest return _deep_merge_rec(copy.deepcopy(destination), source)<|docstring|>Recursively add all keys from `source` into `destination`. Example: >>> destination = {'a': 1, 'b': {'c': 3}} >>> source = {'a': 11, 'b': {'d': 44}, 'e': 55} >>> deep_merge(destination, source) {'a': 11, 'b': {'c': 3, 'd': 44}, 'e': 55}<|endoftext|>
55d90326786f333453d9ce8b042ff04f8e800706938450040c00263aad3beb2c
def expand_dim(t, dim, k): '\n This fucntion tiles/expands a function expands a given tensor (t)\n by an amount k\n eg if t=(10,8), k=3; then if dim=0, t=(30,8) ; if dim=1, t=(10,24)\n ' expand_shape = ([(- 1)] * len(t.shape)) expand_shape[dim] = k '\n expand: Returns a new view of the self tensor with singleton \n dimensions expanded to a larger size. Like tile, it just\n makes multiple copies of current tensor and concatenates in desired shape\n ' return t.expand(*expand_shape)
This fucntion tiles/expands a function expands a given tensor (t) by an amount k eg if t=(10,8), k=3; then if dim=0, t=(30,8) ; if dim=1, t=(10,24)
src/bilinear_sparse_routing_amp.py
expand_dim
Sharut/Scalable-Capsules
0
python
def expand_dim(t, dim, k): '\n This fucntion tiles/expands a function expands a given tensor (t)\n by an amount k\n eg if t=(10,8), k=3; then if dim=0, t=(30,8) ; if dim=1, t=(10,24)\n ' expand_shape = ([(- 1)] * len(t.shape)) expand_shape[dim] = k '\n expand: Returns a new view of the self tensor with singleton \n dimensions expanded to a larger size. Like tile, it just\n makes multiple copies of current tensor and concatenates in desired shape\n ' return t.expand(*expand_shape)
def expand_dim(t, dim, k): '\n This fucntion tiles/expands a function expands a given tensor (t)\n by an amount k\n eg if t=(10,8), k=3; then if dim=0, t=(30,8) ; if dim=1, t=(10,24)\n ' expand_shape = ([(- 1)] * len(t.shape)) expand_shape[dim] = k '\n expand: Returns a new view of the self tensor with singleton \n dimensions expanded to a larger size. Like tile, it just\n makes multiple copies of current tensor and concatenates in desired shape\n ' return t.expand(*expand_shape)<|docstring|>This fucntion tiles/expands a function expands a given tensor (t) by an amount k eg if t=(10,8), k=3; then if dim=0, t=(30,8) ; if dim=1, t=(10,24)<|endoftext|>
6725f09cd05c0138f3b353f2ae096f8fb56eeaeaf9231d17e9c7c86ee16378df
def __init__(__self__, *, resource_group_name: pulumi.Input[str], job_collection_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input['JobCollectionPropertiesArgs']]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None): "\n The set of arguments for constructing a JobCollection resource.\n :param pulumi.Input[str] resource_group_name: The resource group name.\n :param pulumi.Input[str] job_collection_name: The job collection name.\n :param pulumi.Input[str] location: Gets or sets the storage account location.\n :param pulumi.Input[str] name: Gets or sets the job collection resource name.\n :param pulumi.Input['JobCollectionPropertiesArgs'] properties: Gets or sets the job collection properties.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.\n " pulumi.set(__self__, 'resource_group_name', resource_group_name) if (job_collection_name is not None): pulumi.set(__self__, 'job_collection_name', job_collection_name) if (location is not None): pulumi.set(__self__, 'location', location) if (name is not None): pulumi.set(__self__, 'name', name) if (properties is not None): pulumi.set(__self__, 'properties', properties) if (tags is not None): pulumi.set(__self__, 'tags', tags)
The set of arguments for constructing a JobCollection resource. :param pulumi.Input[str] resource_group_name: The resource group name. :param pulumi.Input[str] job_collection_name: The job collection name. :param pulumi.Input[str] location: Gets or sets the storage account location. :param pulumi.Input[str] name: Gets or sets the job collection resource name. :param pulumi.Input['JobCollectionPropertiesArgs'] properties: Gets or sets the job collection properties. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
__init__
sebtelko/pulumi-azure-native
0
python
def __init__(__self__, *, resource_group_name: pulumi.Input[str], job_collection_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input['JobCollectionPropertiesArgs']]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None): "\n The set of arguments for constructing a JobCollection resource.\n :param pulumi.Input[str] resource_group_name: The resource group name.\n :param pulumi.Input[str] job_collection_name: The job collection name.\n :param pulumi.Input[str] location: Gets or sets the storage account location.\n :param pulumi.Input[str] name: Gets or sets the job collection resource name.\n :param pulumi.Input['JobCollectionPropertiesArgs'] properties: Gets or sets the job collection properties.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.\n " pulumi.set(__self__, 'resource_group_name', resource_group_name) if (job_collection_name is not None): pulumi.set(__self__, 'job_collection_name', job_collection_name) if (location is not None): pulumi.set(__self__, 'location', location) if (name is not None): pulumi.set(__self__, 'name', name) if (properties is not None): pulumi.set(__self__, 'properties', properties) if (tags is not None): pulumi.set(__self__, 'tags', tags)
def __init__(__self__, *, resource_group_name: pulumi.Input[str], job_collection_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input['JobCollectionPropertiesArgs']]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None): "\n The set of arguments for constructing a JobCollection resource.\n :param pulumi.Input[str] resource_group_name: The resource group name.\n :param pulumi.Input[str] job_collection_name: The job collection name.\n :param pulumi.Input[str] location: Gets or sets the storage account location.\n :param pulumi.Input[str] name: Gets or sets the job collection resource name.\n :param pulumi.Input['JobCollectionPropertiesArgs'] properties: Gets or sets the job collection properties.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.\n " pulumi.set(__self__, 'resource_group_name', resource_group_name) if (job_collection_name is not None): pulumi.set(__self__, 'job_collection_name', job_collection_name) if (location is not None): pulumi.set(__self__, 'location', location) if (name is not None): pulumi.set(__self__, 'name', name) if (properties is not None): pulumi.set(__self__, 'properties', properties) if (tags is not None): pulumi.set(__self__, 'tags', tags)<|docstring|>The set of arguments for constructing a JobCollection resource. :param pulumi.Input[str] resource_group_name: The resource group name. :param pulumi.Input[str] job_collection_name: The job collection name. :param pulumi.Input[str] location: Gets or sets the storage account location. :param pulumi.Input[str] name: Gets or sets the job collection resource name. :param pulumi.Input['JobCollectionPropertiesArgs'] properties: Gets or sets the job collection properties. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.<|endoftext|>
fc3a6299de8c937ab8ea3daea15d00f35d30647adde5fa4ac8c982cd6707171e
@property @pulumi.getter(name='resourceGroupName') def resource_group_name(self) -> pulumi.Input[str]: '\n The resource group name.\n ' return pulumi.get(self, 'resource_group_name')
The resource group name.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
resource_group_name
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter(name='resourceGroupName') def resource_group_name(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'resource_group_name')
@property @pulumi.getter(name='resourceGroupName') def resource_group_name(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'resource_group_name')<|docstring|>The resource group name.<|endoftext|>
c87fef2c3f19bb6d64a7995769e7ca9a6baa4ee78a1f529b7f5a17ab06b6b69d
@property @pulumi.getter(name='jobCollectionName') def job_collection_name(self) -> Optional[pulumi.Input[str]]: '\n The job collection name.\n ' return pulumi.get(self, 'job_collection_name')
The job collection name.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
job_collection_name
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter(name='jobCollectionName') def job_collection_name(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'job_collection_name')
@property @pulumi.getter(name='jobCollectionName') def job_collection_name(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'job_collection_name')<|docstring|>The job collection name.<|endoftext|>
08acc3999ff8c739394d91d66b18df53ba07664b45a627b7f377219ffcce8118
@property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: '\n Gets or sets the storage account location.\n ' return pulumi.get(self, 'location')
Gets or sets the storage account location.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
location
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'location')<|docstring|>Gets or sets the storage account location.<|endoftext|>
b851faa11fd112870d76004696e23802bcc4723e68048be52e9a89b2d1bcb26e
@property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: '\n Gets or sets the job collection resource name.\n ' return pulumi.get(self, 'name')
Gets or sets the job collection resource name.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
name
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'name')<|docstring|>Gets or sets the job collection resource name.<|endoftext|>
02efad6648a670903c6162f469c27e336c324de067a14c105613fae910537af4
@property @pulumi.getter def properties(self) -> Optional[pulumi.Input['JobCollectionPropertiesArgs']]: '\n Gets or sets the job collection properties.\n ' return pulumi.get(self, 'properties')
Gets or sets the job collection properties.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
properties
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def properties(self) -> Optional[pulumi.Input['JobCollectionPropertiesArgs']]: '\n \n ' return pulumi.get(self, 'properties')
@property @pulumi.getter def properties(self) -> Optional[pulumi.Input['JobCollectionPropertiesArgs']]: '\n \n ' return pulumi.get(self, 'properties')<|docstring|>Gets or sets the job collection properties.<|endoftext|>
35ae37cb0e2661d0a9ddfaea573573d7d8e245fc6d891b872705d543602eb05e
@property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]: '\n Gets or sets the tags.\n ' return pulumi.get(self, 'tags')
Gets or sets the tags.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
tags
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]: '\n \n ' return pulumi.get(self, 'tags')<|docstring|>Gets or sets the tags.<|endoftext|>
d16ec4d842a1255cccfb0400596ecf253bcda8b6ee6dcc691247711681ef7c65
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, job_collection_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None): "\n API Version: 2016-03-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] job_collection_name: The job collection name.\n :param pulumi.Input[str] location: Gets or sets the storage account location.\n :param pulumi.Input[str] name: Gets or sets the job collection resource name.\n :param pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']] properties: Gets or sets the job collection properties.\n :param pulumi.Input[str] resource_group_name: The resource group name.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.\n " ...
API Version: 2016-03-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] job_collection_name: The job collection name. :param pulumi.Input[str] location: Gets or sets the storage account location. :param pulumi.Input[str] name: Gets or sets the job collection resource name. :param pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']] properties: Gets or sets the job collection properties. :param pulumi.Input[str] resource_group_name: The resource group name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
__init__
sebtelko/pulumi-azure-native
0
python
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, job_collection_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None): "\n API Version: 2016-03-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] job_collection_name: The job collection name.\n :param pulumi.Input[str] location: Gets or sets the storage account location.\n :param pulumi.Input[str] name: Gets or sets the job collection resource name.\n :param pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']] properties: Gets or sets the job collection properties.\n :param pulumi.Input[str] resource_group_name: The resource group name.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.\n " ...
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, job_collection_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None): "\n API Version: 2016-03-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] job_collection_name: The job collection name.\n :param pulumi.Input[str] location: Gets or sets the storage account location.\n :param pulumi.Input[str] name: Gets or sets the job collection resource name.\n :param pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']] properties: Gets or sets the job collection properties.\n :param pulumi.Input[str] resource_group_name: The resource group name.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.\n " ...<|docstring|>API Version: 2016-03-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] job_collection_name: The job collection name. :param pulumi.Input[str] location: Gets or sets the storage account location. :param pulumi.Input[str] name: Gets or sets the job collection resource name. :param pulumi.Input[pulumi.InputType['JobCollectionPropertiesArgs']] properties: Gets or sets the job collection properties. :param pulumi.Input[str] resource_group_name: The resource group name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the tags.<|endoftext|>
1b59893c72f0312b6b8931fec27fd0b62ad2dfb2172869686242a2916d66b41d
@overload def __init__(__self__, resource_name: str, args: JobCollectionArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n API Version: 2016-03-01.\n\n :param str resource_name: The name of the resource.\n :param JobCollectionArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...
API Version: 2016-03-01. :param str resource_name: The name of the resource. :param JobCollectionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
__init__
sebtelko/pulumi-azure-native
0
python
@overload def __init__(__self__, resource_name: str, args: JobCollectionArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n API Version: 2016-03-01.\n\n :param str resource_name: The name of the resource.\n :param JobCollectionArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...
@overload def __init__(__self__, resource_name: str, args: JobCollectionArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n API Version: 2016-03-01.\n\n :param str resource_name: The name of the resource.\n :param JobCollectionArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...<|docstring|>API Version: 2016-03-01. :param str resource_name: The name of the resource. :param JobCollectionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
cbe3af96164faef0d3d92c3f54a1f323de3bdb342aa2242fa48a6e5694ce1e78
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'JobCollection': "\n Get an existing JobCollection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = JobCollectionArgs.__new__(JobCollectionArgs) __props__.__dict__['location'] = None __props__.__dict__['name'] = None __props__.__dict__['properties'] = None __props__.__dict__['tags'] = None __props__.__dict__['type'] = None return JobCollection(resource_name, opts=opts, __props__=__props__)
Get an existing JobCollection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
get
sebtelko/pulumi-azure-native
0
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'JobCollection': "\n Get an existing JobCollection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = JobCollectionArgs.__new__(JobCollectionArgs) __props__.__dict__['location'] = None __props__.__dict__['name'] = None __props__.__dict__['properties'] = None __props__.__dict__['tags'] = None __props__.__dict__['type'] = None return JobCollection(resource_name, opts=opts, __props__=__props__)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'JobCollection': "\n Get an existing JobCollection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = JobCollectionArgs.__new__(JobCollectionArgs) __props__.__dict__['location'] = None __props__.__dict__['name'] = None __props__.__dict__['properties'] = None __props__.__dict__['tags'] = None __props__.__dict__['type'] = None return JobCollection(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing JobCollection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
09c5fe388ed7ef5ff0640a81376999cabc78093c9707992ccb6cb8b52d7d8a68
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n Gets or sets the storage account location.\n ' return pulumi.get(self, 'location')
Gets or sets the storage account location.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
location
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'location')<|docstring|>Gets or sets the storage account location.<|endoftext|>
70db4dc8b18b0417999e4bd7915016c6d1b40116a2b0703b9ac89557f6e7dbe7
@property @pulumi.getter def name(self) -> pulumi.Output[Optional[str]]: '\n Gets or sets the job collection resource name.\n ' return pulumi.get(self, 'name')
Gets or sets the job collection resource name.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
name
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def name(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'name')<|docstring|>Gets or sets the job collection resource name.<|endoftext|>
adf3dbd703da7cc471bf06f4c7b02a481b39aad2d2f65426633b947c73b81dd3
@property @pulumi.getter def properties(self) -> pulumi.Output['outputs.JobCollectionPropertiesResponse']: '\n Gets or sets the job collection properties.\n ' return pulumi.get(self, 'properties')
Gets or sets the job collection properties.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
properties
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def properties(self) -> pulumi.Output['outputs.JobCollectionPropertiesResponse']: '\n \n ' return pulumi.get(self, 'properties')
@property @pulumi.getter def properties(self) -> pulumi.Output['outputs.JobCollectionPropertiesResponse']: '\n \n ' return pulumi.get(self, 'properties')<|docstring|>Gets or sets the job collection properties.<|endoftext|>
17d9cc93133ad812ae23a320ad63bd474a6f5add3191cdcb8d0b81c003e25602
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n Gets or sets the tags.\n ' return pulumi.get(self, 'tags')
Gets or sets the tags.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
tags
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n \n ' return pulumi.get(self, 'tags')<|docstring|>Gets or sets the tags.<|endoftext|>
a8a1035bb40007e2a07da45af6060c9d2c9b7136bb3b72474d6a9b151cb0cfc0
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n Gets the job collection resource type.\n ' return pulumi.get(self, 'type')
Gets the job collection resource type.
sdk/python/pulumi_azure_native/scheduler/job_collection.py
type
sebtelko/pulumi-azure-native
0
python
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')<|docstring|>Gets the job collection resource type.<|endoftext|>
8c38f35774bad76865dab629c7e43365c3deeb9cab357bc552ee127cad530be1
@pytest.mark.usefixtures('cleandir') def test_fr7(spc_water_box): 'FR7: Python bindings for launching simulations.\n\n gmx.mdrun uses bindings to C++ API to launch simulations.\n ' md = gmx.mdrun(spc_water_box) md.run()
FR7: Python bindings for launching simulations. gmx.mdrun uses bindings to C++ API to launch simulations.
python_packaging/test/test_fr07.py
test_fr7
kassonlab/gmxapi
43
python
@pytest.mark.usefixtures('cleandir') def test_fr7(spc_water_box): 'FR7: Python bindings for launching simulations.\n\n gmx.mdrun uses bindings to C++ API to launch simulations.\n ' md = gmx.mdrun(spc_water_box) md.run()
@pytest.mark.usefixtures('cleandir') def test_fr7(spc_water_box): 'FR7: Python bindings for launching simulations.\n\n gmx.mdrun uses bindings to C++ API to launch simulations.\n ' md = gmx.mdrun(spc_water_box) md.run()<|docstring|>FR7: Python bindings for launching simulations. gmx.mdrun uses bindings to C++ API to launch simulations.<|endoftext|>
8d690e1e90f6a78c9e86fbeb0295b273182f5a3c6a396baa0e8b329f3468346f
def __init__(self, rc, msg): '\n\t Initialises the ReturnCode object.\n\t \n\t :param int rc: The result code, found in constants\n\t :param str msg: The human (maybe) readable message to be included\n\t\t' self.resultcode = rc self.message = msg
Initialises the ReturnCode object. :param int rc: The result code, found in constants :param str msg: The human (maybe) readable message to be included
a10/build/lib/a10/structures/returncode.py
__init__
THS-on/AttestationEngine
7
python
def __init__(self, rc, msg): '\n\t Initialises the ReturnCode object.\n\t \n\t :param int rc: The result code, found in constants\n\t :param str msg: The human (maybe) readable message to be included\n\t\t' self.resultcode = rc self.message = msg
def __init__(self, rc, msg): '\n\t Initialises the ReturnCode object.\n\t \n\t :param int rc: The result code, found in constants\n\t :param str msg: The human (maybe) readable message to be included\n\t\t' self.resultcode = rc self.message = msg<|docstring|>Initialises the ReturnCode object. :param int rc: The result code, found in constants :param str msg: The human (maybe) readable message to be included<|endoftext|>
ff44fcc91cf232ca694982020bcdf834cdd14d534e20b07bf0d4c6b4812924d1
def rc(self): '\n\t\tReturns the result code\n\t\t\n\t\t:return: the result code\n\t\t:rtype: int\n\t\t' return self.resultcode
Returns the result code :return: the result code :rtype: int
a10/build/lib/a10/structures/returncode.py
rc
THS-on/AttestationEngine
7
python
def rc(self): '\n\t\tReturns the result code\n\t\t\n\t\t:return: the result code\n\t\t:rtype: int\n\t\t' return self.resultcode
def rc(self): '\n\t\tReturns the result code\n\t\t\n\t\t:return: the result code\n\t\t:rtype: int\n\t\t' return self.resultcode<|docstring|>Returns the result code :return: the result code :rtype: int<|endoftext|>
c352440c5b8b07e8822770e0eb02e4b368cbc83dc9228e6e4aa30b005ab34cce
def msg(self): '\n\t\tReturns the message\n\t\t\n\t\t:return: the message\n\t\t:rtype: msg\n\t\t' return self.message
Returns the message :return: the message :rtype: msg
a10/build/lib/a10/structures/returncode.py
msg
THS-on/AttestationEngine
7
python
def msg(self): '\n\t\tReturns the message\n\t\t\n\t\t:return: the message\n\t\t:rtype: msg\n\t\t' return self.message
def msg(self): '\n\t\tReturns the message\n\t\t\n\t\t:return: the message\n\t\t:rtype: msg\n\t\t' return self.message<|docstring|>Returns the message :return: the message :rtype: msg<|endoftext|>
66d39246f289a31b1060056cc7b7a7eabaf3ec58b27c562178f4939a273042b4
def getRemoteFileSize(url, proxy=None): '\n 通过content-length头获取远程文件大小\n ' try: request = urllib.request.Request(url) request.get_method = (lambda : 'HEAD') response = urllib.request.urlopen(request) response.read() except urllib.error.HTTPError as e: print(e.code) print(e.read().decode('utf8')) return 0 else: fileSize = dict(response.headers).get('Content-Length', 0) return int(fileSize)
通过content-length头获取远程文件大小
amemv-video-ripper.py
getRemoteFileSize
menglike/amemv-crawler
1
python
def getRemoteFileSize(url, proxy=None): '\n \n ' try: request = urllib.request.Request(url) request.get_method = (lambda : 'HEAD') response = urllib.request.urlopen(request) response.read() except urllib.error.HTTPError as e: print(e.code) print(e.read().decode('utf8')) return 0 else: fileSize = dict(response.headers).get('Content-Length', 0) return int(fileSize)
def getRemoteFileSize(url, proxy=None): '\n \n ' try: request = urllib.request.Request(url) request.get_method = (lambda : 'HEAD') response = urllib.request.urlopen(request) response.read() except urllib.error.HTTPError as e: print(e.code) print(e.read().decode('utf8')) return 0 else: fileSize = dict(response.headers).get('Content-Length', 0) return int(fileSize)<|docstring|>通过content-length头获取远程文件大小<|endoftext|>
a6b91d154d620944d6063ee9a4e59822d806d9fba718534e8f043a064496b2cd
def convertlang(inputfile, outputfile, templates, pot=False, duplicatestyle='msgctxt', encoding='utf-8'): 'reads in stdin using fromfileclass, converts using convertorclass,\n writes to stdout' inputstore = lang.LangStore(inputfile, encoding=encoding) convertor = lang2po(duplicatestyle=duplicatestyle) outputstore = convertor.convertstore(inputstore) if outputstore.isempty(): return 0 outputfile.write(str(outputstore)) return 1
reads in stdin using fromfileclass, converts using convertorclass, writes to stdout
vendor/packages/translate-toolkit/translate/convert/mozlang2po.py
convertlang
aokoye/fjord
0
python
def convertlang(inputfile, outputfile, templates, pot=False, duplicatestyle='msgctxt', encoding='utf-8'): 'reads in stdin using fromfileclass, converts using convertorclass,\n writes to stdout' inputstore = lang.LangStore(inputfile, encoding=encoding) convertor = lang2po(duplicatestyle=duplicatestyle) outputstore = convertor.convertstore(inputstore) if outputstore.isempty(): return 0 outputfile.write(str(outputstore)) return 1
def convertlang(inputfile, outputfile, templates, pot=False, duplicatestyle='msgctxt', encoding='utf-8'): 'reads in stdin using fromfileclass, converts using convertorclass,\n writes to stdout' inputstore = lang.LangStore(inputfile, encoding=encoding) convertor = lang2po(duplicatestyle=duplicatestyle) outputstore = convertor.convertstore(inputstore) if outputstore.isempty(): return 0 outputfile.write(str(outputstore)) return 1<|docstring|>reads in stdin using fromfileclass, converts using convertorclass, writes to stdout<|endoftext|>
7eb2d1cce4c294847fa6f754138033eb76a7071e4ae4af4217140f72bcdfd5b1
def convertstore(self, thelangfile): 'converts a file to .po format' thetargetfile = po.pofile() targetheader = thetargetfile.header() targetheader.addnote(('extracted from %s' % thelangfile.filename), 'developer') for langunit in thelangfile.units: newunit = thetargetfile.addsourceunit(langunit.source) newunit.settarget(langunit.target) newunit.addlocations(langunit.getlocations()) newunit.addnote(langunit.getnotes(), 'developer') thetargetfile.removeduplicates(self.duplicatestyle) return thetargetfile
converts a file to .po format
vendor/packages/translate-toolkit/translate/convert/mozlang2po.py
convertstore
aokoye/fjord
0
python
def convertstore(self, thelangfile): thetargetfile = po.pofile() targetheader = thetargetfile.header() targetheader.addnote(('extracted from %s' % thelangfile.filename), 'developer') for langunit in thelangfile.units: newunit = thetargetfile.addsourceunit(langunit.source) newunit.settarget(langunit.target) newunit.addlocations(langunit.getlocations()) newunit.addnote(langunit.getnotes(), 'developer') thetargetfile.removeduplicates(self.duplicatestyle) return thetargetfile
def convertstore(self, thelangfile): thetargetfile = po.pofile() targetheader = thetargetfile.header() targetheader.addnote(('extracted from %s' % thelangfile.filename), 'developer') for langunit in thelangfile.units: newunit = thetargetfile.addsourceunit(langunit.source) newunit.settarget(langunit.target) newunit.addlocations(langunit.getlocations()) newunit.addnote(langunit.getnotes(), 'developer') thetargetfile.removeduplicates(self.duplicatestyle) return thetargetfile<|docstring|>converts a file to .po format<|endoftext|>
157675dbc49d08831112431f31a448eba155c42db3d9bd3883f12dd94a72c67f
def onHtmlClean(self): 'Executed on button press' self.saveNow((lambda : 0)) modifiers = self.mw.app.queryKeyboardModifiers() shift_and_click = (modifiers == Qt.ShiftModifier) if shift_and_click: self.onFieldUndo() return self._fieldUndo = Nnoe for n in range(len(self.note.fields)): if (not self.note.fields[n]): continue self.note.fields[n] = cleanHtml(self.note.fields[n]) self.note.flush() self.loadNote() self.web.setFocus() self.web.eval(('focusField(%d);' % n))
Executed on button press
html_cleaner/editor.py
onHtmlClean
Arthur-Milchior/html-cleaner
0
python
def onHtmlClean(self): self.saveNow((lambda : 0)) modifiers = self.mw.app.queryKeyboardModifiers() shift_and_click = (modifiers == Qt.ShiftModifier) if shift_and_click: self.onFieldUndo() return self._fieldUndo = Nnoe for n in range(len(self.note.fields)): if (not self.note.fields[n]): continue self.note.fields[n] = cleanHtml(self.note.fields[n]) self.note.flush() self.loadNote() self.web.setFocus() self.web.eval(('focusField(%d);' % n))
def onHtmlClean(self): self.saveNow((lambda : 0)) modifiers = self.mw.app.queryKeyboardModifiers() shift_and_click = (modifiers == Qt.ShiftModifier) if shift_and_click: self.onFieldUndo() return self._fieldUndo = Nnoe for n in range(len(self.note.fields)): if (not self.note.fields[n]): continue self.note.fields[n] = cleanHtml(self.note.fields[n]) self.note.flush() self.loadNote() self.web.setFocus() self.web.eval(('focusField(%d);' % n))<|docstring|>Executed on button press<|endoftext|>
c5c187dd025843a6c717db9c4813ad46a65a80f5f75d1bc91649471e7656e49d
def onFieldUndo(self): 'Executed on undo toggle' if ((not hasattr(self, '_fieldUndo')) or (not self._fieldUndo)): return (n, html) = self._fieldUndo self.note.fields[n] = html self.loadNote() self.web.setFocus() self.web.eval(('focusField(%d);' % n))
Executed on undo toggle
html_cleaner/editor.py
onFieldUndo
Arthur-Milchior/html-cleaner
0
python
def onFieldUndo(self): if ((not hasattr(self, '_fieldUndo')) or (not self._fieldUndo)): return (n, html) = self._fieldUndo self.note.fields[n] = html self.loadNote() self.web.setFocus() self.web.eval(('focusField(%d);' % n))
def onFieldUndo(self): if ((not hasattr(self, '_fieldUndo')) or (not self._fieldUndo)): return (n, html) = self._fieldUndo self.note.fields[n] = html self.loadNote() self.web.setFocus() self.web.eval(('focusField(%d);' % n))<|docstring|>Executed on undo toggle<|endoftext|>
6933a89f4aa05015bbb44444591803482d16db5953acc5d3a5dd6f83f9eb724b
def onSetNote(self, note, hide, focus): 'Reset undo contents' self._fieldUndo = None
Reset undo contents
html_cleaner/editor.py
onSetNote
Arthur-Milchior/html-cleaner
0
python
def onSetNote(self, note, hide, focus): self._fieldUndo = None
def onSetNote(self, note, hide, focus): self._fieldUndo = None<|docstring|>Reset undo contents<|endoftext|>
81803884e551b2011b7a338f102b5c3b6f5846ea140c6bb49d5f71d111465d00
def onHtmlPaste(self): 'Executed on paste hotkey' mime = self.web.mungeClip(mode=QClipboard.Clipboard) html = mime.html() if (not html): return cleaned = cleanHtml(html) self.web.eval(('\n var pasteHTML = function(html) {\n setFormat("inserthtml", html);\n };\n var filterHTML = function(html) {\n // wrap it in <top> as we aren\'t allowed to change top level elements\n var top = $.parseHTML("<ankitop>" + html + "</ankitop>")[0];\n filterNode(top);\n var outHtml = top.innerHTML;\n // get rid of nbsp\n outHtml = outHtml.replace(/&nbsp;/ig, " ");\n return outHtml;\n };\n pasteHTML(%s);\n ' % json.dumps(cleaned)))
Executed on paste hotkey
html_cleaner/editor.py
onHtmlPaste
Arthur-Milchior/html-cleaner
0
python
def onHtmlPaste(self): mime = self.web.mungeClip(mode=QClipboard.Clipboard) html = mime.html() if (not html): return cleaned = cleanHtml(html) self.web.eval(('\n var pasteHTML = function(html) {\n setFormat("inserthtml", html);\n };\n var filterHTML = function(html) {\n // wrap it in <top> as we aren\'t allowed to change top level elements\n var top = $.parseHTML("<ankitop>" + html + "</ankitop>")[0];\n filterNode(top);\n var outHtml = top.innerHTML;\n // get rid of nbsp\n outHtml = outHtml.replace(/&nbsp;/ig, " ");\n return outHtml;\n };\n pasteHTML(%s);\n ' % json.dumps(cleaned)))
def onHtmlPaste(self): mime = self.web.mungeClip(mode=QClipboard.Clipboard) html = mime.html() if (not html): return cleaned = cleanHtml(html) self.web.eval(('\n var pasteHTML = function(html) {\n setFormat("inserthtml", html);\n };\n var filterHTML = function(html) {\n // wrap it in <top> as we aren\'t allowed to change top level elements\n var top = $.parseHTML("<ankitop>" + html + "</ankitop>")[0];\n filterNode(top);\n var outHtml = top.innerHTML;\n // get rid of nbsp\n outHtml = outHtml.replace(/&nbsp;/ig, " ");\n return outHtml;\n };\n pasteHTML(%s);\n ' % json.dumps(cleaned)))<|docstring|>Executed on paste hotkey<|endoftext|>
0d20286cf0839fb292dc12da0c5531964d92bab4d81968828fe9e7ab2888f604
def setupButtons(righttopbtns, editor): 'Add buttons to editor' html_clean_hotkey = getUserOption('html_clean_hotkey') html_paste_hotkey = getUserOption('html_paste_hotkey') righttopbtns.append(editor.addButton(icon='clean_html', cmd='clean_html', func=onHtmlClean, label='cH', tip='Clean HTML ({})'.format(html_clean_hotkey), keys=html_clean_hotkey)) t = QShortcut(QKeySequence(('Shift+' + html_clean_hotkey)), editor.parentWindow) t.activated.connect((lambda : editor.onFieldUndo())) t = QShortcut(QKeySequence(html_paste_hotkey), editor.parentWindow) t.activated.connect((lambda : editor.onHtmlPaste()))
Add buttons to editor
html_cleaner/editor.py
setupButtons
Arthur-Milchior/html-cleaner
0
python
def setupButtons(righttopbtns, editor): html_clean_hotkey = getUserOption('html_clean_hotkey') html_paste_hotkey = getUserOption('html_paste_hotkey') righttopbtns.append(editor.addButton(icon='clean_html', cmd='clean_html', func=onHtmlClean, label='cH', tip='Clean HTML ({})'.format(html_clean_hotkey), keys=html_clean_hotkey)) t = QShortcut(QKeySequence(('Shift+' + html_clean_hotkey)), editor.parentWindow) t.activated.connect((lambda : editor.onFieldUndo())) t = QShortcut(QKeySequence(html_paste_hotkey), editor.parentWindow) t.activated.connect((lambda : editor.onHtmlPaste()))
def setupButtons(righttopbtns, editor): html_clean_hotkey = getUserOption('html_clean_hotkey') html_paste_hotkey = getUserOption('html_paste_hotkey') righttopbtns.append(editor.addButton(icon='clean_html', cmd='clean_html', func=onHtmlClean, label='cH', tip='Clean HTML ({})'.format(html_clean_hotkey), keys=html_clean_hotkey)) t = QShortcut(QKeySequence(('Shift+' + html_clean_hotkey)), editor.parentWindow) t.activated.connect((lambda : editor.onFieldUndo())) t = QShortcut(QKeySequence(html_paste_hotkey), editor.parentWindow) t.activated.connect((lambda : editor.onHtmlPaste()))<|docstring|>Add buttons to editor<|endoftext|>
42ac73ba0bd87a7808ace0ccb2d1624be179518480507e3b0041a6506f0dbf2c
def create_app(config=None): 'Creates the app.' app = Flask('flaskbb') app.config.from_object('flaskbb.configs.default.DefaultConfig') app.config.from_object(config) app.config.from_envvar('FLASKBB_SETTINGS', silent=True) configure_blueprints(app) configure_extensions(app) configure_template_filters(app) configure_context_processors(app) configure_before_handlers(app) configure_errorhandlers(app) configure_logging(app) return app
Creates the app.
flaskbb/app.py
create_app
konstantin1985/forum
0
python
def create_app(config=None): app = Flask('flaskbb') app.config.from_object('flaskbb.configs.default.DefaultConfig') app.config.from_object(config) app.config.from_envvar('FLASKBB_SETTINGS', silent=True) configure_blueprints(app) configure_extensions(app) configure_template_filters(app) configure_context_processors(app) configure_before_handlers(app) configure_errorhandlers(app) configure_logging(app) return app
def create_app(config=None): app = Flask('flaskbb') app.config.from_object('flaskbb.configs.default.DefaultConfig') app.config.from_object(config) app.config.from_envvar('FLASKBB_SETTINGS', silent=True) configure_blueprints(app) configure_extensions(app) configure_template_filters(app) configure_context_processors(app) configure_before_handlers(app) configure_errorhandlers(app) configure_logging(app) return app<|docstring|>Creates the app.<|endoftext|>
b0f07948a430817a797997cdf7973e1a66c396a6811dd7539f1453cf02e4c3cd
def configure_extensions(app): 'Configures the extensions.' csrf.init_app(app) plugin_manager.init_app(app) db.init_app(app) migrate.init_app(app, db) mail.init_app(app) cache.init_app(app) debugtoolbar.init_app(app) themes.init_themes(app, app_identifier='flaskbb') redis_store.init_app(app) with app.app_context(): whoosh_index(app, Post) whoosh_index(app, Topic) whoosh_index(app, Forum) whoosh_index(app, Category) whoosh_index(app, User) login_manager.login_view = app.config['LOGIN_VIEW'] login_manager.refresh_view = app.config['REAUTH_VIEW'] login_manager.login_message_category = app.config['LOGIN_MESSAGE_CATEGORY'] login_manager.needs_refresh_message_category = app.config['REFRESH_MESSAGE_CATEGORY'] login_manager.anonymous_user = Guest @login_manager.user_loader def load_user(user_id): 'Loads the user. Required by the `login` extension.' user_instance = User.query.filter_by(id=user_id).first() if user_instance: return user_instance else: return None login_manager.init_app(app) babel.init_app(app=app, default_domain=FlaskBBDomain(app)) @babel.localeselector def get_locale(): if (current_user.is_authenticated and current_user.language): return current_user.language return 'ru' allows.init_app(app) allows.identity_loader((lambda : current_user))
Configures the extensions.
flaskbb/app.py
configure_extensions
konstantin1985/forum
0
python
def configure_extensions(app): csrf.init_app(app) plugin_manager.init_app(app) db.init_app(app) migrate.init_app(app, db) mail.init_app(app) cache.init_app(app) debugtoolbar.init_app(app) themes.init_themes(app, app_identifier='flaskbb') redis_store.init_app(app) with app.app_context(): whoosh_index(app, Post) whoosh_index(app, Topic) whoosh_index(app, Forum) whoosh_index(app, Category) whoosh_index(app, User) login_manager.login_view = app.config['LOGIN_VIEW'] login_manager.refresh_view = app.config['REAUTH_VIEW'] login_manager.login_message_category = app.config['LOGIN_MESSAGE_CATEGORY'] login_manager.needs_refresh_message_category = app.config['REFRESH_MESSAGE_CATEGORY'] login_manager.anonymous_user = Guest @login_manager.user_loader def load_user(user_id): 'Loads the user. Required by the `login` extension.' user_instance = User.query.filter_by(id=user_id).first() if user_instance: return user_instance else: return None login_manager.init_app(app) babel.init_app(app=app, default_domain=FlaskBBDomain(app)) @babel.localeselector def get_locale(): if (current_user.is_authenticated and current_user.language): return current_user.language return 'ru' allows.init_app(app) allows.identity_loader((lambda : current_user))
def configure_extensions(app): csrf.init_app(app) plugin_manager.init_app(app) db.init_app(app) migrate.init_app(app, db) mail.init_app(app) cache.init_app(app) debugtoolbar.init_app(app) themes.init_themes(app, app_identifier='flaskbb') redis_store.init_app(app) with app.app_context(): whoosh_index(app, Post) whoosh_index(app, Topic) whoosh_index(app, Forum) whoosh_index(app, Category) whoosh_index(app, User) login_manager.login_view = app.config['LOGIN_VIEW'] login_manager.refresh_view = app.config['REAUTH_VIEW'] login_manager.login_message_category = app.config['LOGIN_MESSAGE_CATEGORY'] login_manager.needs_refresh_message_category = app.config['REFRESH_MESSAGE_CATEGORY'] login_manager.anonymous_user = Guest @login_manager.user_loader def load_user(user_id): 'Loads the user. Required by the `login` extension.' user_instance = User.query.filter_by(id=user_id).first() if user_instance: return user_instance else: return None login_manager.init_app(app) babel.init_app(app=app, default_domain=FlaskBBDomain(app)) @babel.localeselector def get_locale(): if (current_user.is_authenticated and current_user.language): return current_user.language return 'ru' allows.init_app(app) allows.identity_loader((lambda : current_user))<|docstring|>Configures the extensions.<|endoftext|>
f4e5158d1151d60e616e96053c998704920936cbc404f9e5231f9d1a020695a7
def configure_template_filters(app): 'Configures the template filters.' filters = {} filters['markup'] = render_markup filters['format_date'] = format_date filters['time_since'] = time_since filters['is_online'] = is_online filters['crop_title'] = crop_title filters['forum_is_unread'] = forum_is_unread filters['topic_is_unread'] = topic_is_unread permissions = [('is_admin', IsAdmin), ('is_moderator', IsAtleastModerator), ('is_admin_or_moderator', IsAtleastModerator), ('can_edit_user', CanEditUser), ('can_ban_user', CanBanUser)] filters.update([(name, partial(perm, request=request)) for (name, perm) in permissions]) filters['can_moderate'] = TplCanModerate(request) filters['post_reply'] = TplCanPostReply(request) filters['edit_post'] = TplCanEditPost(request) filters['delete_post'] = TplCanDeletePost(request) filters['post_topic'] = TplCanPostTopic(request) filters['delete_topic'] = TplCanDeleteTopic(request) app.jinja_env.filters.update(filters)
Configures the template filters.
flaskbb/app.py
configure_template_filters
konstantin1985/forum
0
python
def configure_template_filters(app): filters = {} filters['markup'] = render_markup filters['format_date'] = format_date filters['time_since'] = time_since filters['is_online'] = is_online filters['crop_title'] = crop_title filters['forum_is_unread'] = forum_is_unread filters['topic_is_unread'] = topic_is_unread permissions = [('is_admin', IsAdmin), ('is_moderator', IsAtleastModerator), ('is_admin_or_moderator', IsAtleastModerator), ('can_edit_user', CanEditUser), ('can_ban_user', CanBanUser)] filters.update([(name, partial(perm, request=request)) for (name, perm) in permissions]) filters['can_moderate'] = TplCanModerate(request) filters['post_reply'] = TplCanPostReply(request) filters['edit_post'] = TplCanEditPost(request) filters['delete_post'] = TplCanDeletePost(request) filters['post_topic'] = TplCanPostTopic(request) filters['delete_topic'] = TplCanDeleteTopic(request) app.jinja_env.filters.update(filters)
def configure_template_filters(app): filters = {} filters['markup'] = render_markup filters['format_date'] = format_date filters['time_since'] = time_since filters['is_online'] = is_online filters['crop_title'] = crop_title filters['forum_is_unread'] = forum_is_unread filters['topic_is_unread'] = topic_is_unread permissions = [('is_admin', IsAdmin), ('is_moderator', IsAtleastModerator), ('is_admin_or_moderator', IsAtleastModerator), ('can_edit_user', CanEditUser), ('can_ban_user', CanBanUser)] filters.update([(name, partial(perm, request=request)) for (name, perm) in permissions]) filters['can_moderate'] = TplCanModerate(request) filters['post_reply'] = TplCanPostReply(request) filters['edit_post'] = TplCanEditPost(request) filters['delete_post'] = TplCanDeletePost(request) filters['post_topic'] = TplCanPostTopic(request) filters['delete_topic'] = TplCanDeleteTopic(request) app.jinja_env.filters.update(filters)<|docstring|>Configures the template filters.<|endoftext|>
d97cf49e7347f13f1d04a4575fa64bf5fb0770285ea942c9713e10e2567c34b2
def configure_context_processors(app): 'Configures the context processors.' @app.context_processor def inject_flaskbb_config(): 'Injects the ``flaskbb_config`` config variable into the\n templates.\n ' return dict(flaskbb_config=flaskbb_config)
Configures the context processors.
flaskbb/app.py
configure_context_processors
konstantin1985/forum
0
python
def configure_context_processors(app): @app.context_processor def inject_flaskbb_config(): 'Injects the ``flaskbb_config`` config variable into the\n templates.\n ' return dict(flaskbb_config=flaskbb_config)
def configure_context_processors(app): @app.context_processor def inject_flaskbb_config(): 'Injects the ``flaskbb_config`` config variable into the\n templates.\n ' return dict(flaskbb_config=flaskbb_config)<|docstring|>Configures the context processors.<|endoftext|>
cb460315d285ae540522606cbaf77a69e169071975dda10697cded27b2007e15
def configure_before_handlers(app): 'Configures the before request handlers.' @app.before_request def update_lastseen(): 'Updates `lastseen` before every reguest if the user is\n authenticated.' if current_user.is_authenticated: current_user.lastseen = datetime.datetime.utcnow() db.session.add(current_user) db.session.commit() if app.config['REDIS_ENABLED']: @app.before_request def mark_current_user_online(): if current_user.is_authenticated: mark_online(current_user.username) else: mark_online(request.remote_addr, guest=True)
Configures the before request handlers.
flaskbb/app.py
configure_before_handlers
konstantin1985/forum
0
python
def configure_before_handlers(app): @app.before_request def update_lastseen(): 'Updates `lastseen` before every reguest if the user is\n authenticated.' if current_user.is_authenticated: current_user.lastseen = datetime.datetime.utcnow() db.session.add(current_user) db.session.commit() if app.config['REDIS_ENABLED']: @app.before_request def mark_current_user_online(): if current_user.is_authenticated: mark_online(current_user.username) else: mark_online(request.remote_addr, guest=True)
def configure_before_handlers(app): @app.before_request def update_lastseen(): 'Updates `lastseen` before every reguest if the user is\n authenticated.' if current_user.is_authenticated: current_user.lastseen = datetime.datetime.utcnow() db.session.add(current_user) db.session.commit() if app.config['REDIS_ENABLED']: @app.before_request def mark_current_user_online(): if current_user.is_authenticated: mark_online(current_user.username) else: mark_online(request.remote_addr, guest=True)<|docstring|>Configures the before request handlers.<|endoftext|>
2d91ba832ff3e3e062fd859f6f39f6f906fe699b5ac68f968fee741b794c6aef
def configure_errorhandlers(app): 'Configures the error handlers.' @app.errorhandler(403) def forbidden_page(error): return (render_template('errors/forbidden_page.html'), 403) @app.errorhandler(404) def page_not_found(error): return (render_template('errors/page_not_found.html'), 404) @app.errorhandler(500) def server_error_page(error): return (render_template('errors/server_error.html'), 500)
Configures the error handlers.
flaskbb/app.py
configure_errorhandlers
konstantin1985/forum
0
python
def configure_errorhandlers(app): @app.errorhandler(403) def forbidden_page(error): return (render_template('errors/forbidden_page.html'), 403) @app.errorhandler(404) def page_not_found(error): return (render_template('errors/page_not_found.html'), 404) @app.errorhandler(500) def server_error_page(error): return (render_template('errors/server_error.html'), 500)
def configure_errorhandlers(app): @app.errorhandler(403) def forbidden_page(error): return (render_template('errors/forbidden_page.html'), 403) @app.errorhandler(404) def page_not_found(error): return (render_template('errors/page_not_found.html'), 404) @app.errorhandler(500) def server_error_page(error): return (render_template('errors/server_error.html'), 500)<|docstring|>Configures the error handlers.<|endoftext|>
7be2c7a2d7c74832bfe3b4e22265563ffcd7dcf93620b4305343d83e60c14e87
def configure_logging(app): 'Configures logging.' logs_folder = os.path.join(app.root_path, os.pardir, 'logs') from logging.handlers import SMTPHandler formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]') info_log = os.path.join(logs_folder, app.config['INFO_LOG']) info_file_handler = logging.handlers.RotatingFileHandler(info_log, maxBytes=100000, backupCount=10) info_file_handler.setLevel(logging.INFO) info_file_handler.setFormatter(formatter) app.logger.addHandler(info_file_handler) error_log = os.path.join(logs_folder, app.config['ERROR_LOG']) error_file_handler = logging.handlers.RotatingFileHandler(error_log, maxBytes=100000, backupCount=10) error_file_handler.setLevel(logging.ERROR) error_file_handler.setFormatter(formatter) app.logger.addHandler(error_file_handler) if app.config['SEND_LOGS']: mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config['MAIL_DEFAULT_SENDER'], app.config['ADMINS'], 'application error, no admins specified', (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(formatter) app.logger.addHandler(mail_handler) if app.config['SQLALCHEMY_ECHO']: @event.listens_for(Engine, 'before_cursor_execute') def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) @event.listens_for(Engine, 'after_cursor_execute') def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = (time.time() - conn.info['query_start_time'].pop((- 1))) app.logger.debug('Total Time: %f', total)
Configures logging.
flaskbb/app.py
configure_logging
konstantin1985/forum
0
python
def configure_logging(app): logs_folder = os.path.join(app.root_path, os.pardir, 'logs') from logging.handlers import SMTPHandler formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]') info_log = os.path.join(logs_folder, app.config['INFO_LOG']) info_file_handler = logging.handlers.RotatingFileHandler(info_log, maxBytes=100000, backupCount=10) info_file_handler.setLevel(logging.INFO) info_file_handler.setFormatter(formatter) app.logger.addHandler(info_file_handler) error_log = os.path.join(logs_folder, app.config['ERROR_LOG']) error_file_handler = logging.handlers.RotatingFileHandler(error_log, maxBytes=100000, backupCount=10) error_file_handler.setLevel(logging.ERROR) error_file_handler.setFormatter(formatter) app.logger.addHandler(error_file_handler) if app.config['SEND_LOGS']: mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config['MAIL_DEFAULT_SENDER'], app.config['ADMINS'], 'application error, no admins specified', (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(formatter) app.logger.addHandler(mail_handler) if app.config['SQLALCHEMY_ECHO']: @event.listens_for(Engine, 'before_cursor_execute') def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) @event.listens_for(Engine, 'after_cursor_execute') def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = (time.time() - conn.info['query_start_time'].pop((- 1))) app.logger.debug('Total Time: %f', total)
def configure_logging(app): logs_folder = os.path.join(app.root_path, os.pardir, 'logs') from logging.handlers import SMTPHandler formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]') info_log = os.path.join(logs_folder, app.config['INFO_LOG']) info_file_handler = logging.handlers.RotatingFileHandler(info_log, maxBytes=100000, backupCount=10) info_file_handler.setLevel(logging.INFO) info_file_handler.setFormatter(formatter) app.logger.addHandler(info_file_handler) error_log = os.path.join(logs_folder, app.config['ERROR_LOG']) error_file_handler = logging.handlers.RotatingFileHandler(error_log, maxBytes=100000, backupCount=10) error_file_handler.setLevel(logging.ERROR) error_file_handler.setFormatter(formatter) app.logger.addHandler(error_file_handler) if app.config['SEND_LOGS']: mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config['MAIL_DEFAULT_SENDER'], app.config['ADMINS'], 'application error, no admins specified', (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(formatter) app.logger.addHandler(mail_handler) if app.config['SQLALCHEMY_ECHO']: @event.listens_for(Engine, 'before_cursor_execute') def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) @event.listens_for(Engine, 'after_cursor_execute') def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = (time.time() - conn.info['query_start_time'].pop((- 1))) app.logger.debug('Total Time: %f', total)<|docstring|>Configures logging.<|endoftext|>
232f14cbd00fc67c98cbed2f4cc5a5d1df4ce18731308d3a617b52c02882bc30
@login_manager.user_loader def load_user(user_id): 'Loads the user. Required by the `login` extension.' user_instance = User.query.filter_by(id=user_id).first() if user_instance: return user_instance else: return None
Loads the user. Required by the `login` extension.
flaskbb/app.py
load_user
konstantin1985/forum
0
python
@login_manager.user_loader def load_user(user_id): user_instance = User.query.filter_by(id=user_id).first() if user_instance: return user_instance else: return None
@login_manager.user_loader def load_user(user_id): user_instance = User.query.filter_by(id=user_id).first() if user_instance: return user_instance else: return None<|docstring|>Loads the user. Required by the `login` extension.<|endoftext|>
a8ccbc3eebdf79c871a8cd1f66133ee8fdf35e6d4c464ae2983e85f60df83433
@app.context_processor def inject_flaskbb_config(): 'Injects the ``flaskbb_config`` config variable into the\n templates.\n ' return dict(flaskbb_config=flaskbb_config)
Injects the ``flaskbb_config`` config variable into the templates.
flaskbb/app.py
inject_flaskbb_config
konstantin1985/forum
0
python
@app.context_processor def inject_flaskbb_config(): 'Injects the ``flaskbb_config`` config variable into the\n templates.\n ' return dict(flaskbb_config=flaskbb_config)
@app.context_processor def inject_flaskbb_config(): 'Injects the ``flaskbb_config`` config variable into the\n templates.\n ' return dict(flaskbb_config=flaskbb_config)<|docstring|>Injects the ``flaskbb_config`` config variable into the templates.<|endoftext|>
43ef852061fce480c4cdbc70f8cfe4c28a7853522a738c81659575204f182296
@app.before_request def update_lastseen(): 'Updates `lastseen` before every reguest if the user is\n authenticated.' if current_user.is_authenticated: current_user.lastseen = datetime.datetime.utcnow() db.session.add(current_user) db.session.commit()
Updates `lastseen` before every reguest if the user is authenticated.
flaskbb/app.py
update_lastseen
konstantin1985/forum
0
python
@app.before_request def update_lastseen(): 'Updates `lastseen` before every reguest if the user is\n authenticated.' if current_user.is_authenticated: current_user.lastseen = datetime.datetime.utcnow() db.session.add(current_user) db.session.commit()
@app.before_request def update_lastseen(): 'Updates `lastseen` before every reguest if the user is\n authenticated.' if current_user.is_authenticated: current_user.lastseen = datetime.datetime.utcnow() db.session.add(current_user) db.session.commit()<|docstring|>Updates `lastseen` before every reguest if the user is authenticated.<|endoftext|>
899fadf79a231f53b4d65955322d0f6f80b01023564aada1ec98666ad4a38941
def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8', tags=TAGS, **kwargs): '\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining\n arguments are passed to the ``CorpusReader`` constructor.\n ' if (not any((key.startswith('cat_') for key in kwargs.keys()))): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids, encoding) self.tags = tags
Initialize the corpus reader. Categorization arguments (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to the ``CategorizedCorpusReader`` constructor. The remaining arguments are passed to the ``CorpusReader`` constructor.
snippets/ch03/reader.py
__init__
bkudaimi/atap
360
python
def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8', tags=TAGS, **kwargs): '\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining\n arguments are passed to the ``CorpusReader`` constructor.\n ' if (not any((key.startswith('cat_') for key in kwargs.keys()))): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids, encoding) self.tags = tags
def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8', tags=TAGS, **kwargs): '\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining\n arguments are passed to the ``CorpusReader`` constructor.\n ' if (not any((key.startswith('cat_') for key in kwargs.keys()))): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids, encoding) self.tags = tags<|docstring|>Initialize the corpus reader. Categorization arguments (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to the ``CategorizedCorpusReader`` constructor. The remaining arguments are passed to the ``CorpusReader`` constructor.<|endoftext|>
07ced3e5302fb932efc7cc1cd063ba58a9bbc8f5671ca9ff4d2ffe0af4629ac7
def resolve(self, fileids, categories): '\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. Implemented similarly to\n the NLTK ``CategorizedPlaintextCorpusReader``.\n ' if ((fileids is not None) and (categories is not None)): raise ValueError('Specify fileids or categories, not both') if (categories is not None): return self.fileids(categories) return fileids
Returns a list of fileids or categories depending on what is passed to each internal corpus reader function. Implemented similarly to the NLTK ``CategorizedPlaintextCorpusReader``.
snippets/ch03/reader.py
resolve
bkudaimi/atap
360
python
def resolve(self, fileids, categories): '\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. Implemented similarly to\n the NLTK ``CategorizedPlaintextCorpusReader``.\n ' if ((fileids is not None) and (categories is not None)): raise ValueError('Specify fileids or categories, not both') if (categories is not None): return self.fileids(categories) return fileids
def resolve(self, fileids, categories): '\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. Implemented similarly to\n the NLTK ``CategorizedPlaintextCorpusReader``.\n ' if ((fileids is not None) and (categories is not None)): raise ValueError('Specify fileids or categories, not both') if (categories is not None): return self.fileids(categories) return fileids<|docstring|>Returns a list of fileids or categories depending on what is passed to each internal corpus reader function. Implemented similarly to the NLTK ``CategorizedPlaintextCorpusReader``.<|endoftext|>
b75629426db4ca4f27388948e077c110b454a342f23301d4859ac01562fc3fd0
def docs(self, fileids=None, categories=None): '\n Returns the complete text of an HTML document, closing the document\n after we are done reading it and yielding it in a memory safe fashion.\n ' fileids = self.resolve(fileids, categories) for (path, encoding) in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as f: (yield f.read())
Returns the complete text of an HTML document, closing the document after we are done reading it and yielding it in a memory safe fashion.
snippets/ch03/reader.py
docs
bkudaimi/atap
360
python
def docs(self, fileids=None, categories=None): '\n Returns the complete text of an HTML document, closing the document\n after we are done reading it and yielding it in a memory safe fashion.\n ' fileids = self.resolve(fileids, categories) for (path, encoding) in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as f: (yield f.read())
def docs(self, fileids=None, categories=None): '\n Returns the complete text of an HTML document, closing the document\n after we are done reading it and yielding it in a memory safe fashion.\n ' fileids = self.resolve(fileids, categories) for (path, encoding) in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as f: (yield f.read())<|docstring|>Returns the complete text of an HTML document, closing the document after we are done reading it and yielding it in a memory safe fashion.<|endoftext|>
0ed160c4e5e17294a01bc68c6e9bf71a73ac8dfd677bfc1a21ebffabc65aa339
def sizes(self, fileids=None, categories=None): '\n Returns a list of tuples, the fileid and size on disk of the file.\n This function is used to detect oddly large files in the corpus.\n ' fileids = self.resolve(fileids, categories) for path in self.abspaths(fileids): (yield os.path.getsize(path))
Returns a list of tuples, the fileid and size on disk of the file. This function is used to detect oddly large files in the corpus.
snippets/ch03/reader.py
sizes
bkudaimi/atap
360
python
def sizes(self, fileids=None, categories=None): '\n Returns a list of tuples, the fileid and size on disk of the file.\n This function is used to detect oddly large files in the corpus.\n ' fileids = self.resolve(fileids, categories) for path in self.abspaths(fileids): (yield os.path.getsize(path))
def sizes(self, fileids=None, categories=None): '\n Returns a list of tuples, the fileid and size on disk of the file.\n This function is used to detect oddly large files in the corpus.\n ' fileids = self.resolve(fileids, categories) for path in self.abspaths(fileids): (yield os.path.getsize(path))<|docstring|>Returns a list of tuples, the fileid and size on disk of the file. This function is used to detect oddly large files in the corpus.<|endoftext|>
48f7f9c7e3165c601ed8e29b993fe01a61fd95293173f4197ac28d5757dcb844
def html(self, fileids=None, categories=None): '\n Returns the HTML content of each document, cleaning it using\n the readability-lxml library.\n ' for doc in self.docs(fileids, categories): try: (yield Paper(doc).summary()) except Unparseable as e: print('Could not parse HTML: {}'.format(e)) continue
Returns the HTML content of each document, cleaning it using the readability-lxml library.
snippets/ch03/reader.py
html
bkudaimi/atap
360
python
def html(self, fileids=None, categories=None): '\n Returns the HTML content of each document, cleaning it using\n the readability-lxml library.\n ' for doc in self.docs(fileids, categories): try: (yield Paper(doc).summary()) except Unparseable as e: print('Could not parse HTML: {}'.format(e)) continue
def html(self, fileids=None, categories=None): '\n Returns the HTML content of each document, cleaning it using\n the readability-lxml library.\n ' for doc in self.docs(fileids, categories): try: (yield Paper(doc).summary()) except Unparseable as e: print('Could not parse HTML: {}'.format(e)) continue<|docstring|>Returns the HTML content of each document, cleaning it using the readability-lxml library.<|endoftext|>
4223b2f857f0ee023724ffdd1a3c1c7c1496f0e932400c0c86469c3cfeb774f3
def paras(self, fileids=None, categories=None): '\n Uses BeautifulSoup to parse the paragraphs from the HTML.\n ' for html in self.html(fileids, categories): soup = bs4.BeautifulSoup(html, 'lxml') for element in soup.find_all(self.tags): (yield element.text) soup.decompose()
Uses BeautifulSoup to parse the paragraphs from the HTML.
snippets/ch03/reader.py
paras
bkudaimi/atap
360
python
def paras(self, fileids=None, categories=None): '\n \n ' for html in self.html(fileids, categories): soup = bs4.BeautifulSoup(html, 'lxml') for element in soup.find_all(self.tags): (yield element.text) soup.decompose()
def paras(self, fileids=None, categories=None): '\n \n ' for html in self.html(fileids, categories): soup = bs4.BeautifulSoup(html, 'lxml') for element in soup.find_all(self.tags): (yield element.text) soup.decompose()<|docstring|>Uses BeautifulSoup to parse the paragraphs from the HTML.<|endoftext|>
18522f78d22fd5c32c0bcc2063deead18ece863376b3d0ef5f581b5b9e900497
def sents(self, fileids=None, categories=None): '\n Uses the built in sentence tokenizer to extract sentences from the\n paragraphs. Note that this method uses BeautifulSoup to parse HTML.\n ' for paragraph in self.paras(fileids, categories): for sentence in sent_tokenize(paragraph): (yield sentence)
Uses the built in sentence tokenizer to extract sentences from the paragraphs. Note that this method uses BeautifulSoup to parse HTML.
snippets/ch03/reader.py
sents
bkudaimi/atap
360
python
def sents(self, fileids=None, categories=None): '\n Uses the built in sentence tokenizer to extract sentences from the\n paragraphs. Note that this method uses BeautifulSoup to parse HTML.\n ' for paragraph in self.paras(fileids, categories): for sentence in sent_tokenize(paragraph): (yield sentence)
def sents(self, fileids=None, categories=None): '\n Uses the built in sentence tokenizer to extract sentences from the\n paragraphs. Note that this method uses BeautifulSoup to parse HTML.\n ' for paragraph in self.paras(fileids, categories): for sentence in sent_tokenize(paragraph): (yield sentence)<|docstring|>Uses the built in sentence tokenizer to extract sentences from the paragraphs. Note that this method uses BeautifulSoup to parse HTML.<|endoftext|>
91e200a7b5e78fc0a357cdb385d572a355af37733b48ca484220f115558c5ecc
def words(self, fileids=None, categories=None): '\n Uses the built in word tokenizer to extract tokens from sentences.\n Note that this method uses BeautifulSoup to parse HTML content.\n ' for sentence in self.sents(fileids, categories): for token in wordpunct_tokenize(sentence): (yield token)
Uses the built in word tokenizer to extract tokens from sentences. Note that this method uses BeautifulSoup to parse HTML content.
snippets/ch03/reader.py
words
bkudaimi/atap
360
python
def words(self, fileids=None, categories=None): '\n Uses the built in word tokenizer to extract tokens from sentences.\n Note that this method uses BeautifulSoup to parse HTML content.\n ' for sentence in self.sents(fileids, categories): for token in wordpunct_tokenize(sentence): (yield token)
def words(self, fileids=None, categories=None): '\n Uses the built in word tokenizer to extract tokens from sentences.\n Note that this method uses BeautifulSoup to parse HTML content.\n ' for sentence in self.sents(fileids, categories): for token in wordpunct_tokenize(sentence): (yield token)<|docstring|>Uses the built in word tokenizer to extract tokens from sentences. Note that this method uses BeautifulSoup to parse HTML content.<|endoftext|>
4d4c8c310bda7cec4cdf37b54e8c479551840d876efb6b580fc9fe6bce09e49e
def tokenize(self, fileids=None, categories=None): '\n Segments, tokenizes, and tags a document in the corpus.\n ' for paragraph in self.paras(fileids, categories): (yield [pos_tag(wordpunct_tokenize(sent)) for sent in sent_tokenize(paragraph)])
Segments, tokenizes, and tags a document in the corpus.
snippets/ch03/reader.py
tokenize
bkudaimi/atap
360
python
def tokenize(self, fileids=None, categories=None): '\n \n ' for paragraph in self.paras(fileids, categories): (yield [pos_tag(wordpunct_tokenize(sent)) for sent in sent_tokenize(paragraph)])
def tokenize(self, fileids=None, categories=None): '\n \n ' for paragraph in self.paras(fileids, categories): (yield [pos_tag(wordpunct_tokenize(sent)) for sent in sent_tokenize(paragraph)])<|docstring|>Segments, tokenizes, and tags a document in the corpus.<|endoftext|>
0e410efeaf277e95e67f6766613be4a172e65165f7d793e744e0bf4345d6d5d2
def describe(self, fileids=None, categories=None): '\n Performs a single pass of the corpus and\n returns a dictionary with a variety of metrics\n concerning the state of the corpus.\n ' started = time.time() counts = nltk.FreqDist() tokens = nltk.FreqDist() for para in self.paras(fileids, categories): counts['paras'] += 1 for sent in sent_tokenize(para): counts['sents'] += 1 for word in wordpunct_tokenize(sent): counts['words'] += 1 tokens[word] += 1 n_fileids = len((self.resolve(fileids, categories) or self.fileids())) n_topics = len(self.categories(self.resolve(fileids, categories))) return {'files': n_fileids, 'topics': n_topics, 'paras': counts['paras'], 'sents': counts['sents'], 'words': counts['words'], 'vocab': len(tokens), 'lexdiv': (float(counts['words']) / float(len(tokens))), 'ppdoc': (float(counts['paras']) / float(n_fileids)), 'sppar': (float(counts['sents']) / float(counts['paras'])), 'secs': (time.time() - started)}
Performs a single pass of the corpus and returns a dictionary with a variety of metrics concerning the state of the corpus.
snippets/ch03/reader.py
describe
bkudaimi/atap
360
python
def describe(self, fileids=None, categories=None): '\n Performs a single pass of the corpus and\n returns a dictionary with a variety of metrics\n concerning the state of the corpus.\n ' started = time.time() counts = nltk.FreqDist() tokens = nltk.FreqDist() for para in self.paras(fileids, categories): counts['paras'] += 1 for sent in sent_tokenize(para): counts['sents'] += 1 for word in wordpunct_tokenize(sent): counts['words'] += 1 tokens[word] += 1 n_fileids = len((self.resolve(fileids, categories) or self.fileids())) n_topics = len(self.categories(self.resolve(fileids, categories))) return {'files': n_fileids, 'topics': n_topics, 'paras': counts['paras'], 'sents': counts['sents'], 'words': counts['words'], 'vocab': len(tokens), 'lexdiv': (float(counts['words']) / float(len(tokens))), 'ppdoc': (float(counts['paras']) / float(n_fileids)), 'sppar': (float(counts['sents']) / float(counts['paras'])), 'secs': (time.time() - started)}
def describe(self, fileids=None, categories=None): '\n Performs a single pass of the corpus and\n returns a dictionary with a variety of metrics\n concerning the state of the corpus.\n ' started = time.time() counts = nltk.FreqDist() tokens = nltk.FreqDist() for para in self.paras(fileids, categories): counts['paras'] += 1 for sent in sent_tokenize(para): counts['sents'] += 1 for word in wordpunct_tokenize(sent): counts['words'] += 1 tokens[word] += 1 n_fileids = len((self.resolve(fileids, categories) or self.fileids())) n_topics = len(self.categories(self.resolve(fileids, categories))) return {'files': n_fileids, 'topics': n_topics, 'paras': counts['paras'], 'sents': counts['sents'], 'words': counts['words'], 'vocab': len(tokens), 'lexdiv': (float(counts['words']) / float(len(tokens))), 'ppdoc': (float(counts['paras']) / float(n_fileids)), 'sppar': (float(counts['sents']) / float(counts['paras'])), 'secs': (time.time() - started)}<|docstring|>Performs a single pass of the corpus and returns a dictionary with a variety of metrics concerning the state of the corpus.<|endoftext|>
ee09cade30cbe782de64bc745a4ac066f875ee1c999ccac7477774be012f2896
def __init__(self, root, fileids=PKL_PATTERN, **kwargs): '\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining arguments\n are passed to the ``CorpusReader`` constructor.\n ' if (not any((key.startswith('cat_') for key in kwargs.keys()))): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids)
Initialize the corpus reader. Categorization arguments (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to the ``CategorizedCorpusReader`` constructor. The remaining arguments are passed to the ``CorpusReader`` constructor.
snippets/ch03/reader.py
__init__
bkudaimi/atap
360
python
def __init__(self, root, fileids=PKL_PATTERN, **kwargs): '\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining arguments\n are passed to the ``CorpusReader`` constructor.\n ' if (not any((key.startswith('cat_') for key in kwargs.keys()))): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids)
def __init__(self, root, fileids=PKL_PATTERN, **kwargs): '\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining arguments\n are passed to the ``CorpusReader`` constructor.\n ' if (not any((key.startswith('cat_') for key in kwargs.keys()))): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids)<|docstring|>Initialize the corpus reader. Categorization arguments (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to the ``CategorizedCorpusReader`` constructor. The remaining arguments are passed to the ``CorpusReader`` constructor.<|endoftext|>
6f1f3067f1f2704dc3dd5f2c22461aef8444e3460774a2d353091d183beeffbc
def resolve(self, fileids, categories): '\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. This primarily bubbles up to\n the high level ``docs`` method, but is implemented here similar to\n the nltk ``CategorizedPlaintextCorpusReader``.\n ' if ((fileids is not None) and (categories is not None)): raise ValueError('Specify fileids or categories, not both') if (categories is not None): return self.fileids(categories) return fileids
Returns a list of fileids or categories depending on what is passed to each internal corpus reader function. This primarily bubbles up to the high level ``docs`` method, but is implemented here similar to the nltk ``CategorizedPlaintextCorpusReader``.
snippets/ch03/reader.py
resolve
bkudaimi/atap
360
python
def resolve(self, fileids, categories): '\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. This primarily bubbles up to\n the high level ``docs`` method, but is implemented here similar to\n the nltk ``CategorizedPlaintextCorpusReader``.\n ' if ((fileids is not None) and (categories is not None)): raise ValueError('Specify fileids or categories, not both') if (categories is not None): return self.fileids(categories) return fileids
def resolve(self, fileids, categories): '\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. This primarily bubbles up to\n the high level ``docs`` method, but is implemented here similar to\n the nltk ``CategorizedPlaintextCorpusReader``.\n ' if ((fileids is not None) and (categories is not None)): raise ValueError('Specify fileids or categories, not both') if (categories is not None): return self.fileids(categories) return fileids<|docstring|>Returns a list of fileids or categories depending on what is passed to each internal corpus reader function. This primarily bubbles up to the high level ``docs`` method, but is implemented here similar to the nltk ``CategorizedPlaintextCorpusReader``.<|endoftext|>
e0d66845b6f801a0a5f8d0324e30eb9005d27b236477cf303a9c627e23f8ead4
def docs(self, fileids=None, categories=None): '\n Returns the document loaded from a pickled object for every file in\n the corpus. Similar to the BaleenCorpusReader, this uses a generator\n to acheive memory safe iteration.\n ' fileids = self.resolve(fileids, categories) for (path, enc, fileid) in self.abspaths(fileids, True, True): with open(path, 'rb') as f: (yield pickle.load(f))
Returns the document loaded from a pickled object for every file in the corpus. Similar to the BaleenCorpusReader, this uses a generator to acheive memory safe iteration.
snippets/ch03/reader.py
docs
bkudaimi/atap
360
python
def docs(self, fileids=None, categories=None): '\n Returns the document loaded from a pickled object for every file in\n the corpus. Similar to the BaleenCorpusReader, this uses a generator\n to acheive memory safe iteration.\n ' fileids = self.resolve(fileids, categories) for (path, enc, fileid) in self.abspaths(fileids, True, True): with open(path, 'rb') as f: (yield pickle.load(f))
def docs(self, fileids=None, categories=None): '\n Returns the document loaded from a pickled object for every file in\n the corpus. Similar to the BaleenCorpusReader, this uses a generator\n to acheive memory safe iteration.\n ' fileids = self.resolve(fileids, categories) for (path, enc, fileid) in self.abspaths(fileids, True, True): with open(path, 'rb') as f: (yield pickle.load(f))<|docstring|>Returns the document loaded from a pickled object for every file in the corpus. Similar to the BaleenCorpusReader, this uses a generator to acheive memory safe iteration.<|endoftext|>
14b5786c33f4c223f24fdd7b91f3e36aeab127f26bc5e0677275da90b1f1ccfe
def paras(self, fileids=None, categories=None): '\n Returns a generator of paragraphs where each paragraph is a list of\n sentences, which is in turn a list of (token, tag) tuples.\n ' for doc in self.docs(fileids, categories): for paragraph in doc: (yield paragraph)
Returns a generator of paragraphs where each paragraph is a list of sentences, which is in turn a list of (token, tag) tuples.
snippets/ch03/reader.py
paras
bkudaimi/atap
360
python
def paras(self, fileids=None, categories=None): '\n Returns a generator of paragraphs where each paragraph is a list of\n sentences, which is in turn a list of (token, tag) tuples.\n ' for doc in self.docs(fileids, categories): for paragraph in doc: (yield paragraph)
def paras(self, fileids=None, categories=None): '\n Returns a generator of paragraphs where each paragraph is a list of\n sentences, which is in turn a list of (token, tag) tuples.\n ' for doc in self.docs(fileids, categories): for paragraph in doc: (yield paragraph)<|docstring|>Returns a generator of paragraphs where each paragraph is a list of sentences, which is in turn a list of (token, tag) tuples.<|endoftext|>
b91331960734290aa60f090c05323e9d614529387c0579b8e8d8b617153aca74
def sents(self, fileids=None, categories=None): '\n Returns a generator of sentences where each sentence is a list of\n (token, tag) tuples.\n ' for paragraph in self.paras(fileids, categories): for sentence in paragraph: (yield sentence)
Returns a generator of sentences where each sentence is a list of (token, tag) tuples.
snippets/ch03/reader.py
sents
bkudaimi/atap
360
python
def sents(self, fileids=None, categories=None): '\n Returns a generator of sentences where each sentence is a list of\n (token, tag) tuples.\n ' for paragraph in self.paras(fileids, categories): for sentence in paragraph: (yield sentence)
def sents(self, fileids=None, categories=None): '\n Returns a generator of sentences where each sentence is a list of\n (token, tag) tuples.\n ' for paragraph in self.paras(fileids, categories): for sentence in paragraph: (yield sentence)<|docstring|>Returns a generator of sentences where each sentence is a list of (token, tag) tuples.<|endoftext|>
692224c3fe7b4193d382124410591db50c90593c9a5addd8e0a0bb72a34285ce
def words(self, fileids=None, categories=None): '\n Returns a generator of (token, tag) tuples.\n ' for token in self.tagged(fileids, categories): (yield token[0])
Returns a generator of (token, tag) tuples.
snippets/ch03/reader.py
words
bkudaimi/atap
360
python
def words(self, fileids=None, categories=None): '\n \n ' for token in self.tagged(fileids, categories): (yield token[0])
def words(self, fileids=None, categories=None): '\n \n ' for token in self.tagged(fileids, categories): (yield token[0])<|docstring|>Returns a generator of (token, tag) tuples.<|endoftext|>
873f20c0e56d815f45489f074e92a38f1c2c87a8006409b0bc5b109afcfc06ae
def is_logging_initialized(): '\n We use the same strategy as the logging module when checking if\n the logging was initialized - look for handlers in the root logger\n ' return (len(logging.root.handlers) > 0)
We use the same strategy as the logging module when checking if the logging was initialized - look for handlers in the root logger
oletools/common/log_helper/_root_logger_wrapper.py
is_logging_initialized
TheVivisector/oletools
2,059
python
def is_logging_initialized(): '\n We use the same strategy as the logging module when checking if\n the logging was initialized - look for handlers in the root logger\n ' return (len(logging.root.handlers) > 0)
def is_logging_initialized(): '\n We use the same strategy as the logging module when checking if\n the logging was initialized - look for handlers in the root logger\n ' return (len(logging.root.handlers) > 0)<|docstring|>We use the same strategy as the logging module when checking if the logging was initialized - look for handlers in the root logger<|endoftext|>
4d2c40c3251fbf2c680eb0afe164440499e8f168909da81c1dfac1ebf0453107
def set_formatter(fmt): '\n Set the formatter to be used by every handler of the root logger.\n ' if (not is_logging_initialized()): return for handler in logging.root.handlers: handler.setFormatter(fmt)
Set the formatter to be used by every handler of the root logger.
oletools/common/log_helper/_root_logger_wrapper.py
set_formatter
TheVivisector/oletools
2,059
python
def set_formatter(fmt): '\n \n ' if (not is_logging_initialized()): return for handler in logging.root.handlers: handler.setFormatter(fmt)
def set_formatter(fmt): '\n \n ' if (not is_logging_initialized()): return for handler in logging.root.handlers: handler.setFormatter(fmt)<|docstring|>Set the formatter to be used by every handler of the root logger.<|endoftext|>
9f41069d514ab893d7f9070342ce7c7ff26ebe5b0430ae9c6a81e8911e5e2b9c
def iseq_cds_coverage(self, organism: str) -> float: '\n Fraction of CDS matches from organism CDSs.\n\n Returns\n -------\n float\n Fraction of matches.\n ' chunks_dir = Path(((self._root / organism) / 'chunks')) if (not chunks_dir.exists()): return 0.0 assert chunks_dir.is_dir() cds_amino_file = ((self._root / organism) / 'cds_amino.fasta') assert cds_amino_file.exists() cds_ids = [] with read_fasta(cds_amino_file) as file: for item in file: cds_ids.append(item.id.partition('|')[0]) cds_id_matches = [] for f in chunks_dir.glob('*.gff'): gff = iseq.gff.read(f) ids = gff.dataframe['seqid'].str.replace('\\|.*', '') cds_id_matches += ids.tolist() cds_set = set(cds_ids) match_set = set(cds_id_matches) nremain = len((cds_set - match_set)) return (1 - (nremain / len(cds_set)))
Fraction of CDS matches from organism CDSs. Returns ------- float Fraction of matches.
iseq_prof/_profiling.py
iseq_cds_coverage
EBI-Metagenomics/iseq-prof
0
python
def iseq_cds_coverage(self, organism: str) -> float: '\n Fraction of CDS matches from organism CDSs.\n\n Returns\n -------\n float\n Fraction of matches.\n ' chunks_dir = Path(((self._root / organism) / 'chunks')) if (not chunks_dir.exists()): return 0.0 assert chunks_dir.is_dir() cds_amino_file = ((self._root / organism) / 'cds_amino.fasta') assert cds_amino_file.exists() cds_ids = [] with read_fasta(cds_amino_file) as file: for item in file: cds_ids.append(item.id.partition('|')[0]) cds_id_matches = [] for f in chunks_dir.glob('*.gff'): gff = iseq.gff.read(f) ids = gff.dataframe['seqid'].str.replace('\\|.*', ) cds_id_matches += ids.tolist() cds_set = set(cds_ids) match_set = set(cds_id_matches) nremain = len((cds_set - match_set)) return (1 - (nremain / len(cds_set)))
def iseq_cds_coverage(self, organism: str) -> float: '\n Fraction of CDS matches from organism CDSs.\n\n Returns\n -------\n float\n Fraction of matches.\n ' chunks_dir = Path(((self._root / organism) / 'chunks')) if (not chunks_dir.exists()): return 0.0 assert chunks_dir.is_dir() cds_amino_file = ((self._root / organism) / 'cds_amino.fasta') assert cds_amino_file.exists() cds_ids = [] with read_fasta(cds_amino_file) as file: for item in file: cds_ids.append(item.id.partition('|')[0]) cds_id_matches = [] for f in chunks_dir.glob('*.gff'): gff = iseq.gff.read(f) ids = gff.dataframe['seqid'].str.replace('\\|.*', ) cds_id_matches += ids.tolist() cds_set = set(cds_ids) match_set = set(cds_id_matches) nremain = len((cds_set - match_set)) return (1 - (nremain / len(cds_set)))<|docstring|>Fraction of CDS matches from organism CDSs. Returns ------- float Fraction of matches.<|endoftext|>
b355eb521357d7458e180c646a6cb3ac2133acb985ba8453e0731a7d73d6d3ab
def merge_chunks(self, organism: str, force=False): '\n Merge ISEQ chunked files.\n\n Parameters\n ----------\n organism\n Organism accession.\n force\n Overwrite existing files if necessary. Defaults to ``False``.\n ' names = ['output.gff', 'oamino.fasta', 'ocodon.fasta'] root = (self._root / organism) if ((not force) and all(((root / n).exists() for n in names))): files = [n for n in names if (root / n).exists()] files_list = ', '.join(files) raise ValueError(f'File(s) {files_list} already exist.') folder = (root / 'chunks') globs = ['output.*.gff', 'oamino.*.fasta', 'ocodon.*.fasta'] chunks: List[Set[int]] = [set(), set(), set()] for (i, glob) in enumerate(globs): for f in folder.glob(glob): chunks[i].add(int(f.name.split('.')[1])) chunks_set = ((chunks[0] & chunks[1]) & chunks[2]) nums = list(chunks_set) merge_files('output', 'gff', root, nums, True) merge_files('oamino', 'fasta', root, nums, False) merge_files('ocodon', 'fasta', root, nums, False)
Merge ISEQ chunked files. Parameters ---------- organism Organism accession. force Overwrite existing files if necessary. Defaults to ``False``.
iseq_prof/_profiling.py
merge_chunks
EBI-Metagenomics/iseq-prof
0
python
def merge_chunks(self, organism: str, force=False): '\n Merge ISEQ chunked files.\n\n Parameters\n ----------\n organism\n Organism accession.\n force\n Overwrite existing files if necessary. Defaults to ``False``.\n ' names = ['output.gff', 'oamino.fasta', 'ocodon.fasta'] root = (self._root / organism) if ((not force) and all(((root / n).exists() for n in names))): files = [n for n in names if (root / n).exists()] files_list = ', '.join(files) raise ValueError(f'File(s) {files_list} already exist.') folder = (root / 'chunks') globs = ['output.*.gff', 'oamino.*.fasta', 'ocodon.*.fasta'] chunks: List[Set[int]] = [set(), set(), set()] for (i, glob) in enumerate(globs): for f in folder.glob(glob): chunks[i].add(int(f.name.split('.')[1])) chunks_set = ((chunks[0] & chunks[1]) & chunks[2]) nums = list(chunks_set) merge_files('output', 'gff', root, nums, True) merge_files('oamino', 'fasta', root, nums, False) merge_files('ocodon', 'fasta', root, nums, False)
def merge_chunks(self, organism: str, force=False): '\n Merge ISEQ chunked files.\n\n Parameters\n ----------\n organism\n Organism accession.\n force\n Overwrite existing files if necessary. Defaults to ``False``.\n ' names = ['output.gff', 'oamino.fasta', 'ocodon.fasta'] root = (self._root / organism) if ((not force) and all(((root / n).exists() for n in names))): files = [n for n in names if (root / n).exists()] files_list = ', '.join(files) raise ValueError(f'File(s) {files_list} already exist.') folder = (root / 'chunks') globs = ['output.*.gff', 'oamino.*.fasta', 'ocodon.*.fasta'] chunks: List[Set[int]] = [set(), set(), set()] for (i, glob) in enumerate(globs): for f in folder.glob(glob): chunks[i].add(int(f.name.split('.')[1])) chunks_set = ((chunks[0] & chunks[1]) & chunks[2]) nums = list(chunks_set) merge_files('output', 'gff', root, nums, True) merge_files('oamino', 'fasta', root, nums, False) merge_files('ocodon', 'fasta', root, nums, False)<|docstring|>Merge ISEQ chunked files. Parameters ---------- organism Organism accession. force Overwrite existing files if necessary. Defaults to ``False``.<|endoftext|>
d4d5d088a0691ff44c3080223d53b65399fea6718de0b7c81cc729d10da703ee
@not_implemented_for('directed') @not_implemented_for('multigraph') def cycle_basis(G, root=None): ' Returns a list of cycles which form a basis for cycles of G.\n\n A basis for cycles of a network is a minimal collection of\n cycles such that any cycle in the network can be written\n as a sum of cycles in the basis. Here summation of cycles\n is defined as "exclusive or" of the edges. Cycle bases are\n useful, e.g. when deriving equations for electric circuits\n using Kirchhoff\'s Laws.\n\n Parameters\n ----------\n G : NetworkX Graph\n root : node, optional\n Specify starting node for basis.\n\n Returns\n -------\n A list of cycle lists. Each cycle list is a list of nodes\n which forms a cycle (loop) in G.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> nx.add_cycle(G, [0, 1, 2, 3])\n >>> nx.add_cycle(G, [0, 3, 4, 5])\n >>> print(nx.cycle_basis(G, 0))\n [[3, 4, 5, 0], [1, 2, 3, 0]]\n\n Notes\n -----\n This is adapted from algorithm CACM 491 [1]_.\n\n References\n ----------\n .. [1] Paton, K. An algorithm for finding a fundamental set of\n cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.\n\n See Also\n --------\n simple_cycles\n ' gnodes = set(G.nodes()) cycles = [] while gnodes: if (root is None): root = gnodes.pop() stack = [root] pred = {root: root} used = {root: set()} while stack: z = stack.pop() zused = used[z] for nbr in G[z]: if (nbr not in used): pred[nbr] = z stack.append(nbr) used[nbr] = set([z]) elif (nbr == z): cycles.append([z]) elif (nbr not in zused): pn = used[nbr] cycle = [nbr, z] p = pred[z] while (p not in pn): cycle.append(p) p = pred[p] cycle.append(p) cycles.append(cycle) used[nbr].add(z) gnodes -= set(pred) root = None return cycles
Returns a list of cycles which form a basis for cycles of G. A basis for cycles of a network is a minimal collection of cycles such that any cycle in the network can be written as a sum of cycles in the basis. Here summation of cycles is defined as "exclusive or" of the edges. Cycle bases are useful, e.g. when deriving equations for electric circuits using Kirchhoff's Laws. Parameters ---------- G : NetworkX Graph root : node, optional Specify starting node for basis. Returns ------- A list of cycle lists. Each cycle list is a list of nodes which forms a cycle (loop) in G. Examples -------- >>> G = nx.Graph() >>> nx.add_cycle(G, [0, 1, 2, 3]) >>> nx.add_cycle(G, [0, 3, 4, 5]) >>> print(nx.cycle_basis(G, 0)) [[3, 4, 5, 0], [1, 2, 3, 0]] Notes ----- This is adapted from algorithm CACM 491 [1]_. References ---------- .. [1] Paton, K. An algorithm for finding a fundamental set of cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. See Also -------- simple_cycles
src/networkx/algorithms/cycles.py
cycle_basis
MarletteFunding/aws-kube-codesuite
184
python
@not_implemented_for('directed') @not_implemented_for('multigraph') def cycle_basis(G, root=None): ' Returns a list of cycles which form a basis for cycles of G.\n\n A basis for cycles of a network is a minimal collection of\n cycles such that any cycle in the network can be written\n as a sum of cycles in the basis. Here summation of cycles\n is defined as "exclusive or" of the edges. Cycle bases are\n useful, e.g. when deriving equations for electric circuits\n using Kirchhoff\'s Laws.\n\n Parameters\n ----------\n G : NetworkX Graph\n root : node, optional\n Specify starting node for basis.\n\n Returns\n -------\n A list of cycle lists. Each cycle list is a list of nodes\n which forms a cycle (loop) in G.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> nx.add_cycle(G, [0, 1, 2, 3])\n >>> nx.add_cycle(G, [0, 3, 4, 5])\n >>> print(nx.cycle_basis(G, 0))\n [[3, 4, 5, 0], [1, 2, 3, 0]]\n\n Notes\n -----\n This is adapted from algorithm CACM 491 [1]_.\n\n References\n ----------\n .. [1] Paton, K. An algorithm for finding a fundamental set of\n cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.\n\n See Also\n --------\n simple_cycles\n ' gnodes = set(G.nodes()) cycles = [] while gnodes: if (root is None): root = gnodes.pop() stack = [root] pred = {root: root} used = {root: set()} while stack: z = stack.pop() zused = used[z] for nbr in G[z]: if (nbr not in used): pred[nbr] = z stack.append(nbr) used[nbr] = set([z]) elif (nbr == z): cycles.append([z]) elif (nbr not in zused): pn = used[nbr] cycle = [nbr, z] p = pred[z] while (p not in pn): cycle.append(p) p = pred[p] cycle.append(p) cycles.append(cycle) used[nbr].add(z) gnodes -= set(pred) root = None return cycles
@not_implemented_for('directed') @not_implemented_for('multigraph') def cycle_basis(G, root=None): ' Returns a list of cycles which form a basis for cycles of G.\n\n A basis for cycles of a network is a minimal collection of\n cycles such that any cycle in the network can be written\n as a sum of cycles in the basis. Here summation of cycles\n is defined as "exclusive or" of the edges. Cycle bases are\n useful, e.g. when deriving equations for electric circuits\n using Kirchhoff\'s Laws.\n\n Parameters\n ----------\n G : NetworkX Graph\n root : node, optional\n Specify starting node for basis.\n\n Returns\n -------\n A list of cycle lists. Each cycle list is a list of nodes\n which forms a cycle (loop) in G.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> nx.add_cycle(G, [0, 1, 2, 3])\n >>> nx.add_cycle(G, [0, 3, 4, 5])\n >>> print(nx.cycle_basis(G, 0))\n [[3, 4, 5, 0], [1, 2, 3, 0]]\n\n Notes\n -----\n This is adapted from algorithm CACM 491 [1]_.\n\n References\n ----------\n .. [1] Paton, K. An algorithm for finding a fundamental set of\n cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.\n\n See Also\n --------\n simple_cycles\n ' gnodes = set(G.nodes()) cycles = [] while gnodes: if (root is None): root = gnodes.pop() stack = [root] pred = {root: root} used = {root: set()} while stack: z = stack.pop() zused = used[z] for nbr in G[z]: if (nbr not in used): pred[nbr] = z stack.append(nbr) used[nbr] = set([z]) elif (nbr == z): cycles.append([z]) elif (nbr not in zused): pn = used[nbr] cycle = [nbr, z] p = pred[z] while (p not in pn): cycle.append(p) p = pred[p] cycle.append(p) cycles.append(cycle) used[nbr].add(z) gnodes -= set(pred) root = None return cycles<|docstring|>Returns a list of cycles which form a basis for cycles of G. A basis for cycles of a network is a minimal collection of cycles such that any cycle in the network can be written as a sum of cycles in the basis. Here summation of cycles is defined as "exclusive or" of the edges. Cycle bases are useful, e.g. when deriving equations for electric circuits using Kirchhoff's Laws. Parameters ---------- G : NetworkX Graph root : node, optional Specify starting node for basis. Returns ------- A list of cycle lists. Each cycle list is a list of nodes which forms a cycle (loop) in G. Examples -------- >>> G = nx.Graph() >>> nx.add_cycle(G, [0, 1, 2, 3]) >>> nx.add_cycle(G, [0, 3, 4, 5]) >>> print(nx.cycle_basis(G, 0)) [[3, 4, 5, 0], [1, 2, 3, 0]] Notes ----- This is adapted from algorithm CACM 491 [1]_. References ---------- .. [1] Paton, K. An algorithm for finding a fundamental set of cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. See Also -------- simple_cycles<|endoftext|>
a4bdeaa70e0c0ed9265718264c2e6d507cbf8eaeb1b5d6edfc46e742d907178c
@not_implemented_for('undirected') def simple_cycles(G): "Find simple cycles (elementary circuits) of a directed graph.\n\n A `simple cycle`, or `elementary circuit`, is a closed path where\n no node appears twice. Two elementary circuits are distinct if they\n are not cyclic permutations of each other.\n\n This is a nonrecursive, iterator/generator version of Johnson's\n algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed graph\n\n Returns\n -------\n cycle_generator: generator\n A generator that produces elementary cycles of the graph.\n Each cycle is represented by a list of nodes along the cycle.\n\n Examples\n --------\n >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]\n >>> G = nx.DiGraph(edges)\n >>> len(list(nx.simple_cycles(G)))\n 5\n\n To filter the cycles so that they don't include certain nodes or edges,\n copy your graph and eliminate those nodes or edges before calling\n\n >>> copyG = G.copy()\n >>> copyG.remove_nodes_from([1])\n >>> copyG.remove_edges_from([(0, 1)])\n >>> len(list(nx.simple_cycles(copyG)))\n 3\n\n\n Notes\n -----\n The implementation follows pp. 79-80 in [1]_.\n\n The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$\n elementary circuits.\n\n References\n ----------\n .. [1] Finding all the elementary circuits of a directed graph.\n D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.\n http://dx.doi.org/10.1137/0204007\n .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.\n G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.\n .. [3] A search strategy for the elementary cycles of a directed graph.\n J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,\n v. 16, no. 2, 192-204, 1976.\n\n See Also\n --------\n cycle_basis\n " def _unblock(thisnode, blocked, B): stack = set([thisnode]) while stack: node = stack.pop() if (node in blocked): blocked.remove(node) stack.update(B[node]) B[node].clear() subG = type(G)(G.edges()) sccs = list(nx.strongly_connected_components(subG)) while sccs: scc = sccs.pop() startnode = scc.pop() path = [startnode] blocked = set() closed = set() blocked.add(startnode) B = defaultdict(set) stack = [(startnode, list(subG[startnode]))] while stack: (thisnode, nbrs) = stack[(- 1)] if nbrs: nextnode = nbrs.pop() if (nextnode == startnode): (yield path[:]) closed.update(path) elif (nextnode not in blocked): path.append(nextnode) stack.append((nextnode, list(subG[nextnode]))) closed.discard(nextnode) blocked.add(nextnode) continue if (not nbrs): if (thisnode in closed): _unblock(thisnode, blocked, B) else: for nbr in subG[thisnode]: if (thisnode not in B[nbr]): B[nbr].add(thisnode) stack.pop() path.pop() subG.remove_node(startnode) H = subG.subgraph(scc) sccs.extend(list(nx.strongly_connected_components(H)))
Find simple cycles (elementary circuits) of a directed graph. A `simple cycle`, or `elementary circuit`, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. This is a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_. Parameters ---------- G : NetworkX DiGraph A directed graph Returns ------- cycle_generator: generator A generator that produces elementary cycles of the graph. Each cycle is represented by a list of nodes along the cycle. Examples -------- >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] >>> G = nx.DiGraph(edges) >>> len(list(nx.simple_cycles(G))) 5 To filter the cycles so that they don't include certain nodes or edges, copy your graph and eliminate those nodes or edges before calling >>> copyG = G.copy() >>> copyG.remove_nodes_from([1]) >>> copyG.remove_edges_from([(0, 1)]) >>> len(list(nx.simple_cycles(copyG))) 3 Notes ----- The implementation follows pp. 79-80 in [1]_. The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ elementary circuits. References ---------- .. [1] Finding all the elementary circuits of a directed graph. D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. http://dx.doi.org/10.1137/0204007 .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy. G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. .. [3] A search strategy for the elementary cycles of a directed graph. J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, v. 16, no. 2, 192-204, 1976. See Also -------- cycle_basis
src/networkx/algorithms/cycles.py
simple_cycles
MarletteFunding/aws-kube-codesuite
184
python
@not_implemented_for('undirected') def simple_cycles(G): "Find simple cycles (elementary circuits) of a directed graph.\n\n A `simple cycle`, or `elementary circuit`, is a closed path where\n no node appears twice. Two elementary circuits are distinct if they\n are not cyclic permutations of each other.\n\n This is a nonrecursive, iterator/generator version of Johnson's\n algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed graph\n\n Returns\n -------\n cycle_generator: generator\n A generator that produces elementary cycles of the graph.\n Each cycle is represented by a list of nodes along the cycle.\n\n Examples\n --------\n >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]\n >>> G = nx.DiGraph(edges)\n >>> len(list(nx.simple_cycles(G)))\n 5\n\n To filter the cycles so that they don't include certain nodes or edges,\n copy your graph and eliminate those nodes or edges before calling\n\n >>> copyG = G.copy()\n >>> copyG.remove_nodes_from([1])\n >>> copyG.remove_edges_from([(0, 1)])\n >>> len(list(nx.simple_cycles(copyG)))\n 3\n\n\n Notes\n -----\n The implementation follows pp. 79-80 in [1]_.\n\n The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$\n elementary circuits.\n\n References\n ----------\n .. [1] Finding all the elementary circuits of a directed graph.\n D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.\n http://dx.doi.org/10.1137/0204007\n .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.\n G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.\n .. [3] A search strategy for the elementary cycles of a directed graph.\n J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,\n v. 16, no. 2, 192-204, 1976.\n\n See Also\n --------\n cycle_basis\n " def _unblock(thisnode, blocked, B): stack = set([thisnode]) while stack: node = stack.pop() if (node in blocked): blocked.remove(node) stack.update(B[node]) B[node].clear() subG = type(G)(G.edges()) sccs = list(nx.strongly_connected_components(subG)) while sccs: scc = sccs.pop() startnode = scc.pop() path = [startnode] blocked = set() closed = set() blocked.add(startnode) B = defaultdict(set) stack = [(startnode, list(subG[startnode]))] while stack: (thisnode, nbrs) = stack[(- 1)] if nbrs: nextnode = nbrs.pop() if (nextnode == startnode): (yield path[:]) closed.update(path) elif (nextnode not in blocked): path.append(nextnode) stack.append((nextnode, list(subG[nextnode]))) closed.discard(nextnode) blocked.add(nextnode) continue if (not nbrs): if (thisnode in closed): _unblock(thisnode, blocked, B) else: for nbr in subG[thisnode]: if (thisnode not in B[nbr]): B[nbr].add(thisnode) stack.pop() path.pop() subG.remove_node(startnode) H = subG.subgraph(scc) sccs.extend(list(nx.strongly_connected_components(H)))
@not_implemented_for('undirected') def simple_cycles(G): "Find simple cycles (elementary circuits) of a directed graph.\n\n A `simple cycle`, or `elementary circuit`, is a closed path where\n no node appears twice. Two elementary circuits are distinct if they\n are not cyclic permutations of each other.\n\n This is a nonrecursive, iterator/generator version of Johnson's\n algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed graph\n\n Returns\n -------\n cycle_generator: generator\n A generator that produces elementary cycles of the graph.\n Each cycle is represented by a list of nodes along the cycle.\n\n Examples\n --------\n >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]\n >>> G = nx.DiGraph(edges)\n >>> len(list(nx.simple_cycles(G)))\n 5\n\n To filter the cycles so that they don't include certain nodes or edges,\n copy your graph and eliminate those nodes or edges before calling\n\n >>> copyG = G.copy()\n >>> copyG.remove_nodes_from([1])\n >>> copyG.remove_edges_from([(0, 1)])\n >>> len(list(nx.simple_cycles(copyG)))\n 3\n\n\n Notes\n -----\n The implementation follows pp. 79-80 in [1]_.\n\n The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$\n elementary circuits.\n\n References\n ----------\n .. [1] Finding all the elementary circuits of a directed graph.\n D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.\n http://dx.doi.org/10.1137/0204007\n .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.\n G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.\n .. [3] A search strategy for the elementary cycles of a directed graph.\n J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,\n v. 16, no. 2, 192-204, 1976.\n\n See Also\n --------\n cycle_basis\n " def _unblock(thisnode, blocked, B): stack = set([thisnode]) while stack: node = stack.pop() if (node in blocked): blocked.remove(node) stack.update(B[node]) B[node].clear() subG = type(G)(G.edges()) sccs = list(nx.strongly_connected_components(subG)) while sccs: scc = sccs.pop() startnode = scc.pop() path = [startnode] blocked = set() closed = set() blocked.add(startnode) B = defaultdict(set) stack = [(startnode, list(subG[startnode]))] while stack: (thisnode, nbrs) = stack[(- 1)] if nbrs: nextnode = nbrs.pop() if (nextnode == startnode): (yield path[:]) closed.update(path) elif (nextnode not in blocked): path.append(nextnode) stack.append((nextnode, list(subG[nextnode]))) closed.discard(nextnode) blocked.add(nextnode) continue if (not nbrs): if (thisnode in closed): _unblock(thisnode, blocked, B) else: for nbr in subG[thisnode]: if (thisnode not in B[nbr]): B[nbr].add(thisnode) stack.pop() path.pop() subG.remove_node(startnode) H = subG.subgraph(scc) sccs.extend(list(nx.strongly_connected_components(H)))<|docstring|>Find simple cycles (elementary circuits) of a directed graph. A `simple cycle`, or `elementary circuit`, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. This is a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_. Parameters ---------- G : NetworkX DiGraph A directed graph Returns ------- cycle_generator: generator A generator that produces elementary cycles of the graph. Each cycle is represented by a list of nodes along the cycle. Examples -------- >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] >>> G = nx.DiGraph(edges) >>> len(list(nx.simple_cycles(G))) 5 To filter the cycles so that they don't include certain nodes or edges, copy your graph and eliminate those nodes or edges before calling >>> copyG = G.copy() >>> copyG.remove_nodes_from([1]) >>> copyG.remove_edges_from([(0, 1)]) >>> len(list(nx.simple_cycles(copyG))) 3 Notes ----- The implementation follows pp. 79-80 in [1]_. The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ elementary circuits. References ---------- .. [1] Finding all the elementary circuits of a directed graph. D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. http://dx.doi.org/10.1137/0204007 .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy. G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. .. [3] A search strategy for the elementary cycles of a directed graph. J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, v. 16, no. 2, 192-204, 1976. See Also -------- cycle_basis<|endoftext|>
6ab463bfaf46e67d976cddaee9362c4fa308158f48f794a9bcd97ff80518d38a
@not_implemented_for('undirected') def recursive_simple_cycles(G): 'Find simple cycles (elementary circuits) of a directed graph.\n\n A `simple cycle`, or `elementary circuit`, is a closed path where\n no node appears twice. Two elementary circuits are distinct if they\n are not cyclic permutations of each other.\n\n This version uses a recursive algorithm to build a list of cycles.\n You should probably use the iterator version called simple_cycles().\n Warning: This recursive version uses lots of RAM!\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed graph\n\n Returns\n -------\n A list of cycles, where each cycle is represented by a list of nodes\n along the cycle.\n\n Example:\n\n >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]\n >>> G = nx.DiGraph(edges)\n >>> nx.recursive_simple_cycles(G)\n [[0], [0, 1, 2], [0, 2], [1, 2], [2]]\n\n See Also\n --------\n cycle_basis (for undirected graphs)\n\n Notes\n -----\n The implementation follows pp. 79-80 in [1]_.\n\n The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$\n elementary circuits.\n\n References\n ----------\n .. [1] Finding all the elementary circuits of a directed graph.\n D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.\n http://dx.doi.org/10.1137/0204007\n\n See Also\n --------\n simple_cycles, cycle_basis\n ' def _unblock(thisnode): 'Recursively unblock and remove nodes from B[thisnode].' if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop()) def circuit(thisnode, startnode, component): closed = False path.append(thisnode) blocked[thisnode] = True for nextnode in component[thisnode]: if (nextnode == startnode): result.append(path[:]) closed = True elif (not blocked[nextnode]): if circuit(nextnode, startnode, component): closed = True if closed: _unblock(thisnode) else: for nextnode in component[thisnode]: if (thisnode not in B[nextnode]): B[nextnode].append(thisnode) path.pop() return closed path = [] blocked = defaultdict(bool) B = defaultdict(list) result = [] ordering = dict(zip(G, range(len(G)))) for s in ordering: subgraph = G.subgraph((node for node in G if (ordering[node] >= ordering[s]))) strongcomp = nx.strongly_connected_components(subgraph) mincomp = min(strongcomp, key=(lambda ns: min((ordering[n] for n in ns)))) component = G.subgraph(mincomp) if component: startnode = min(component, key=ordering.__getitem__) for node in component: blocked[node] = False B[node][:] = [] dummy = circuit(startnode, startnode, component) return result
Find simple cycles (elementary circuits) of a directed graph. A `simple cycle`, or `elementary circuit`, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. This version uses a recursive algorithm to build a list of cycles. You should probably use the iterator version called simple_cycles(). Warning: This recursive version uses lots of RAM! Parameters ---------- G : NetworkX DiGraph A directed graph Returns ------- A list of cycles, where each cycle is represented by a list of nodes along the cycle. Example: >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] >>> G = nx.DiGraph(edges) >>> nx.recursive_simple_cycles(G) [[0], [0, 1, 2], [0, 2], [1, 2], [2]] See Also -------- cycle_basis (for undirected graphs) Notes ----- The implementation follows pp. 79-80 in [1]_. The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ elementary circuits. References ---------- .. [1] Finding all the elementary circuits of a directed graph. D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. http://dx.doi.org/10.1137/0204007 See Also -------- simple_cycles, cycle_basis
src/networkx/algorithms/cycles.py
recursive_simple_cycles
MarletteFunding/aws-kube-codesuite
184
python
@not_implemented_for('undirected') def recursive_simple_cycles(G): 'Find simple cycles (elementary circuits) of a directed graph.\n\n A `simple cycle`, or `elementary circuit`, is a closed path where\n no node appears twice. Two elementary circuits are distinct if they\n are not cyclic permutations of each other.\n\n This version uses a recursive algorithm to build a list of cycles.\n You should probably use the iterator version called simple_cycles().\n Warning: This recursive version uses lots of RAM!\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed graph\n\n Returns\n -------\n A list of cycles, where each cycle is represented by a list of nodes\n along the cycle.\n\n Example:\n\n >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]\n >>> G = nx.DiGraph(edges)\n >>> nx.recursive_simple_cycles(G)\n [[0], [0, 1, 2], [0, 2], [1, 2], [2]]\n\n See Also\n --------\n cycle_basis (for undirected graphs)\n\n Notes\n -----\n The implementation follows pp. 79-80 in [1]_.\n\n The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$\n elementary circuits.\n\n References\n ----------\n .. [1] Finding all the elementary circuits of a directed graph.\n D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.\n http://dx.doi.org/10.1137/0204007\n\n See Also\n --------\n simple_cycles, cycle_basis\n ' def _unblock(thisnode): 'Recursively unblock and remove nodes from B[thisnode].' if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop()) def circuit(thisnode, startnode, component): closed = False path.append(thisnode) blocked[thisnode] = True for nextnode in component[thisnode]: if (nextnode == startnode): result.append(path[:]) closed = True elif (not blocked[nextnode]): if circuit(nextnode, startnode, component): closed = True if closed: _unblock(thisnode) else: for nextnode in component[thisnode]: if (thisnode not in B[nextnode]): B[nextnode].append(thisnode) path.pop() return closed path = [] blocked = defaultdict(bool) B = defaultdict(list) result = [] ordering = dict(zip(G, range(len(G)))) for s in ordering: subgraph = G.subgraph((node for node in G if (ordering[node] >= ordering[s]))) strongcomp = nx.strongly_connected_components(subgraph) mincomp = min(strongcomp, key=(lambda ns: min((ordering[n] for n in ns)))) component = G.subgraph(mincomp) if component: startnode = min(component, key=ordering.__getitem__) for node in component: blocked[node] = False B[node][:] = [] dummy = circuit(startnode, startnode, component) return result
@not_implemented_for('undirected') def recursive_simple_cycles(G): 'Find simple cycles (elementary circuits) of a directed graph.\n\n A `simple cycle`, or `elementary circuit`, is a closed path where\n no node appears twice. Two elementary circuits are distinct if they\n are not cyclic permutations of each other.\n\n This version uses a recursive algorithm to build a list of cycles.\n You should probably use the iterator version called simple_cycles().\n Warning: This recursive version uses lots of RAM!\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed graph\n\n Returns\n -------\n A list of cycles, where each cycle is represented by a list of nodes\n along the cycle.\n\n Example:\n\n >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]\n >>> G = nx.DiGraph(edges)\n >>> nx.recursive_simple_cycles(G)\n [[0], [0, 1, 2], [0, 2], [1, 2], [2]]\n\n See Also\n --------\n cycle_basis (for undirected graphs)\n\n Notes\n -----\n The implementation follows pp. 79-80 in [1]_.\n\n The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$\n elementary circuits.\n\n References\n ----------\n .. [1] Finding all the elementary circuits of a directed graph.\n D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.\n http://dx.doi.org/10.1137/0204007\n\n See Also\n --------\n simple_cycles, cycle_basis\n ' def _unblock(thisnode): 'Recursively unblock and remove nodes from B[thisnode].' if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop()) def circuit(thisnode, startnode, component): closed = False path.append(thisnode) blocked[thisnode] = True for nextnode in component[thisnode]: if (nextnode == startnode): result.append(path[:]) closed = True elif (not blocked[nextnode]): if circuit(nextnode, startnode, component): closed = True if closed: _unblock(thisnode) else: for nextnode in component[thisnode]: if (thisnode not in B[nextnode]): B[nextnode].append(thisnode) path.pop() return closed path = [] blocked = defaultdict(bool) B = defaultdict(list) result = [] ordering = dict(zip(G, range(len(G)))) for s in ordering: subgraph = G.subgraph((node for node in G if (ordering[node] >= ordering[s]))) strongcomp = nx.strongly_connected_components(subgraph) mincomp = min(strongcomp, key=(lambda ns: min((ordering[n] for n in ns)))) component = G.subgraph(mincomp) if component: startnode = min(component, key=ordering.__getitem__) for node in component: blocked[node] = False B[node][:] = [] dummy = circuit(startnode, startnode, component) return result<|docstring|>Find simple cycles (elementary circuits) of a directed graph. A `simple cycle`, or `elementary circuit`, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. This version uses a recursive algorithm to build a list of cycles. You should probably use the iterator version called simple_cycles(). Warning: This recursive version uses lots of RAM! Parameters ---------- G : NetworkX DiGraph A directed graph Returns ------- A list of cycles, where each cycle is represented by a list of nodes along the cycle. Example: >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] >>> G = nx.DiGraph(edges) >>> nx.recursive_simple_cycles(G) [[0], [0, 1, 2], [0, 2], [1, 2], [2]] See Also -------- cycle_basis (for undirected graphs) Notes ----- The implementation follows pp. 79-80 in [1]_. The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ elementary circuits. References ---------- .. [1] Finding all the elementary circuits of a directed graph. D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. http://dx.doi.org/10.1137/0204007 See Also -------- simple_cycles, cycle_basis<|endoftext|>
816a5cc70c0153cb1256c2345b54c4d75fe011a72305a059de2e3992bfe3c42d
def find_cycle(G, source=None, orientation='original'): '\n Returns the edges of a cycle found via a directed, depth-first traversal.\n\n Parameters\n ----------\n G : graph\n A directed/undirected graph/multigraph.\n\n source : node, list of nodes\n The node from which the traversal begins. If None, then a source\n is chosen arbitrarily and repeatedly until all edges from each node in\n the graph are searched.\n\n orientation : \'original\' | \'reverse\' | \'ignore\'\n For directed graphs and directed multigraphs, edge traversals need not\n respect the original orientation of the edges. When set to \'reverse\',\n then every edge will be traversed in the reverse direction. When set to\n \'ignore\', then each directed edge is treated as a single undirected\n edge that can be traversed in either direction. For undirected graphs\n and undirected multigraphs, this parameter is meaningless and is not\n consulted by the algorithm.\n\n Returns\n -------\n edges : directed edges\n A list of directed edges indicating the path taken for the loop. If\n no cycle is found, then an exception is raised. For graphs, an\n edge is of the form `(u, v)` where `u` and `v` are the tail and head\n of the edge as determined by the traversal. For multigraphs, an edge is\n of the form `(u, v, key)`, where `key` is the key of the edge. When the\n graph is directed, then `u` and `v` are always in the order of the\n actual directed edge. If orientation is \'ignore\', then an edge takes\n the form `(u, v, key, direction)` where direction indicates if the edge\n was followed in the forward (tail to head) or reverse (head to tail)\n direction. When the direction is forward, the value of `direction`\n is \'forward\'. When the direction is reverse, the value of `direction`\n is \'reverse\'.\n\n Raises\n ------\n NetworkXNoCycle\n If no cycle was found.\n\n Examples\n --------\n In this example, we construct a DAG and find, in the first call, that there\n are no directed cycles, and so an exception is raised. In the second call,\n we ignore edge orientations and find that there is an undirected cycle.\n Note that the second call finds a directed cycle while effectively\n traversing an undirected graph, and so, we found an "undirected cycle".\n This means that this DAG structure does not form a directed tree (which\n is also known as a polytree).\n\n >>> import networkx as nx\n >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])\n >>> try:\n ... nx.find_cycle(G, orientation=\'original\')\n ... except:\n ... pass\n ...\n >>> list(nx.find_cycle(G, orientation=\'ignore\'))\n [(0, 1, \'forward\'), (1, 2, \'forward\'), (0, 2, \'reverse\')]\n\n ' (out_edge, key, tailhead) = helper_funcs(G, orientation) explored = set() cycle = [] final_node = None for start_node in G.nbunch_iter(source): if (start_node in explored): continue edges = [] seen = {start_node} active_nodes = {start_node} previous_head = None for edge in edge_dfs(G, start_node, orientation): (tail, head) = tailhead(edge) if (head in explored): continue if ((previous_head is not None) and (tail != previous_head)): while True: try: popped_edge = edges.pop() except IndexError: edges = [] active_nodes = {tail} break else: popped_head = tailhead(popped_edge)[1] active_nodes.remove(popped_head) if edges: last_head = tailhead(edges[(- 1)])[1] if (tail == last_head): break edges.append(edge) if (head in active_nodes): cycle.extend(edges) final_node = head break else: seen.add(head) active_nodes.add(head) previous_head = head if cycle: break else: explored.update(seen) else: assert (len(cycle) == 0) raise nx.exception.NetworkXNoCycle('No cycle found.') for (i, edge) in enumerate(cycle): (tail, head) = tailhead(edge) if (tail == final_node): break return cycle[i:]
Returns the edges of a cycle found via a directed, depth-first traversal. Parameters ---------- G : graph A directed/undirected graph/multigraph. source : node, list of nodes The node from which the traversal begins. If None, then a source is chosen arbitrarily and repeatedly until all edges from each node in the graph are searched. orientation : 'original' | 'reverse' | 'ignore' For directed graphs and directed multigraphs, edge traversals need not respect the original orientation of the edges. When set to 'reverse', then every edge will be traversed in the reverse direction. When set to 'ignore', then each directed edge is treated as a single undirected edge that can be traversed in either direction. For undirected graphs and undirected multigraphs, this parameter is meaningless and is not consulted by the algorithm. Returns ------- edges : directed edges A list of directed edges indicating the path taken for the loop. If no cycle is found, then an exception is raised. For graphs, an edge is of the form `(u, v)` where `u` and `v` are the tail and head of the edge as determined by the traversal. For multigraphs, an edge is of the form `(u, v, key)`, where `key` is the key of the edge. When the graph is directed, then `u` and `v` are always in the order of the actual directed edge. If orientation is 'ignore', then an edge takes the form `(u, v, key, direction)` where direction indicates if the edge was followed in the forward (tail to head) or reverse (head to tail) direction. When the direction is forward, the value of `direction` is 'forward'. When the direction is reverse, the value of `direction` is 'reverse'. Raises ------ NetworkXNoCycle If no cycle was found. Examples -------- In this example, we construct a DAG and find, in the first call, that there are no directed cycles, and so an exception is raised. In the second call, we ignore edge orientations and find that there is an undirected cycle. Note that the second call finds a directed cycle while effectively traversing an undirected graph, and so, we found an "undirected cycle". This means that this DAG structure does not form a directed tree (which is also known as a polytree). >>> import networkx as nx >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) >>> try: ... nx.find_cycle(G, orientation='original') ... except: ... pass ... >>> list(nx.find_cycle(G, orientation='ignore')) [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]
src/networkx/algorithms/cycles.py
find_cycle
MarletteFunding/aws-kube-codesuite
184
python
def find_cycle(G, source=None, orientation='original'): '\n Returns the edges of a cycle found via a directed, depth-first traversal.\n\n Parameters\n ----------\n G : graph\n A directed/undirected graph/multigraph.\n\n source : node, list of nodes\n The node from which the traversal begins. If None, then a source\n is chosen arbitrarily and repeatedly until all edges from each node in\n the graph are searched.\n\n orientation : \'original\' | \'reverse\' | \'ignore\'\n For directed graphs and directed multigraphs, edge traversals need not\n respect the original orientation of the edges. When set to \'reverse\',\n then every edge will be traversed in the reverse direction. When set to\n \'ignore\', then each directed edge is treated as a single undirected\n edge that can be traversed in either direction. For undirected graphs\n and undirected multigraphs, this parameter is meaningless and is not\n consulted by the algorithm.\n\n Returns\n -------\n edges : directed edges\n A list of directed edges indicating the path taken for the loop. If\n no cycle is found, then an exception is raised. For graphs, an\n edge is of the form `(u, v)` where `u` and `v` are the tail and head\n of the edge as determined by the traversal. For multigraphs, an edge is\n of the form `(u, v, key)`, where `key` is the key of the edge. When the\n graph is directed, then `u` and `v` are always in the order of the\n actual directed edge. If orientation is \'ignore\', then an edge takes\n the form `(u, v, key, direction)` where direction indicates if the edge\n was followed in the forward (tail to head) or reverse (head to tail)\n direction. When the direction is forward, the value of `direction`\n is \'forward\'. When the direction is reverse, the value of `direction`\n is \'reverse\'.\n\n Raises\n ------\n NetworkXNoCycle\n If no cycle was found.\n\n Examples\n --------\n In this example, we construct a DAG and find, in the first call, that there\n are no directed cycles, and so an exception is raised. In the second call,\n we ignore edge orientations and find that there is an undirected cycle.\n Note that the second call finds a directed cycle while effectively\n traversing an undirected graph, and so, we found an "undirected cycle".\n This means that this DAG structure does not form a directed tree (which\n is also known as a polytree).\n\n >>> import networkx as nx\n >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])\n >>> try:\n ... nx.find_cycle(G, orientation=\'original\')\n ... except:\n ... pass\n ...\n >>> list(nx.find_cycle(G, orientation=\'ignore\'))\n [(0, 1, \'forward\'), (1, 2, \'forward\'), (0, 2, \'reverse\')]\n\n ' (out_edge, key, tailhead) = helper_funcs(G, orientation) explored = set() cycle = [] final_node = None for start_node in G.nbunch_iter(source): if (start_node in explored): continue edges = [] seen = {start_node} active_nodes = {start_node} previous_head = None for edge in edge_dfs(G, start_node, orientation): (tail, head) = tailhead(edge) if (head in explored): continue if ((previous_head is not None) and (tail != previous_head)): while True: try: popped_edge = edges.pop() except IndexError: edges = [] active_nodes = {tail} break else: popped_head = tailhead(popped_edge)[1] active_nodes.remove(popped_head) if edges: last_head = tailhead(edges[(- 1)])[1] if (tail == last_head): break edges.append(edge) if (head in active_nodes): cycle.extend(edges) final_node = head break else: seen.add(head) active_nodes.add(head) previous_head = head if cycle: break else: explored.update(seen) else: assert (len(cycle) == 0) raise nx.exception.NetworkXNoCycle('No cycle found.') for (i, edge) in enumerate(cycle): (tail, head) = tailhead(edge) if (tail == final_node): break return cycle[i:]
def find_cycle(G, source=None, orientation='original'): '\n Returns the edges of a cycle found via a directed, depth-first traversal.\n\n Parameters\n ----------\n G : graph\n A directed/undirected graph/multigraph.\n\n source : node, list of nodes\n The node from which the traversal begins. If None, then a source\n is chosen arbitrarily and repeatedly until all edges from each node in\n the graph are searched.\n\n orientation : \'original\' | \'reverse\' | \'ignore\'\n For directed graphs and directed multigraphs, edge traversals need not\n respect the original orientation of the edges. When set to \'reverse\',\n then every edge will be traversed in the reverse direction. When set to\n \'ignore\', then each directed edge is treated as a single undirected\n edge that can be traversed in either direction. For undirected graphs\n and undirected multigraphs, this parameter is meaningless and is not\n consulted by the algorithm.\n\n Returns\n -------\n edges : directed edges\n A list of directed edges indicating the path taken for the loop. If\n no cycle is found, then an exception is raised. For graphs, an\n edge is of the form `(u, v)` where `u` and `v` are the tail and head\n of the edge as determined by the traversal. For multigraphs, an edge is\n of the form `(u, v, key)`, where `key` is the key of the edge. When the\n graph is directed, then `u` and `v` are always in the order of the\n actual directed edge. If orientation is \'ignore\', then an edge takes\n the form `(u, v, key, direction)` where direction indicates if the edge\n was followed in the forward (tail to head) or reverse (head to tail)\n direction. When the direction is forward, the value of `direction`\n is \'forward\'. When the direction is reverse, the value of `direction`\n is \'reverse\'.\n\n Raises\n ------\n NetworkXNoCycle\n If no cycle was found.\n\n Examples\n --------\n In this example, we construct a DAG and find, in the first call, that there\n are no directed cycles, and so an exception is raised. In the second call,\n we ignore edge orientations and find that there is an undirected cycle.\n Note that the second call finds a directed cycle while effectively\n traversing an undirected graph, and so, we found an "undirected cycle".\n This means that this DAG structure does not form a directed tree (which\n is also known as a polytree).\n\n >>> import networkx as nx\n >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])\n >>> try:\n ... nx.find_cycle(G, orientation=\'original\')\n ... except:\n ... pass\n ...\n >>> list(nx.find_cycle(G, orientation=\'ignore\'))\n [(0, 1, \'forward\'), (1, 2, \'forward\'), (0, 2, \'reverse\')]\n\n ' (out_edge, key, tailhead) = helper_funcs(G, orientation) explored = set() cycle = [] final_node = None for start_node in G.nbunch_iter(source): if (start_node in explored): continue edges = [] seen = {start_node} active_nodes = {start_node} previous_head = None for edge in edge_dfs(G, start_node, orientation): (tail, head) = tailhead(edge) if (head in explored): continue if ((previous_head is not None) and (tail != previous_head)): while True: try: popped_edge = edges.pop() except IndexError: edges = [] active_nodes = {tail} break else: popped_head = tailhead(popped_edge)[1] active_nodes.remove(popped_head) if edges: last_head = tailhead(edges[(- 1)])[1] if (tail == last_head): break edges.append(edge) if (head in active_nodes): cycle.extend(edges) final_node = head break else: seen.add(head) active_nodes.add(head) previous_head = head if cycle: break else: explored.update(seen) else: assert (len(cycle) == 0) raise nx.exception.NetworkXNoCycle('No cycle found.') for (i, edge) in enumerate(cycle): (tail, head) = tailhead(edge) if (tail == final_node): break return cycle[i:]<|docstring|>Returns the edges of a cycle found via a directed, depth-first traversal. Parameters ---------- G : graph A directed/undirected graph/multigraph. source : node, list of nodes The node from which the traversal begins. If None, then a source is chosen arbitrarily and repeatedly until all edges from each node in the graph are searched. orientation : 'original' | 'reverse' | 'ignore' For directed graphs and directed multigraphs, edge traversals need not respect the original orientation of the edges. When set to 'reverse', then every edge will be traversed in the reverse direction. When set to 'ignore', then each directed edge is treated as a single undirected edge that can be traversed in either direction. For undirected graphs and undirected multigraphs, this parameter is meaningless and is not consulted by the algorithm. Returns ------- edges : directed edges A list of directed edges indicating the path taken for the loop. If no cycle is found, then an exception is raised. For graphs, an edge is of the form `(u, v)` where `u` and `v` are the tail and head of the edge as determined by the traversal. For multigraphs, an edge is of the form `(u, v, key)`, where `key` is the key of the edge. When the graph is directed, then `u` and `v` are always in the order of the actual directed edge. If orientation is 'ignore', then an edge takes the form `(u, v, key, direction)` where direction indicates if the edge was followed in the forward (tail to head) or reverse (head to tail) direction. When the direction is forward, the value of `direction` is 'forward'. When the direction is reverse, the value of `direction` is 'reverse'. Raises ------ NetworkXNoCycle If no cycle was found. Examples -------- In this example, we construct a DAG and find, in the first call, that there are no directed cycles, and so an exception is raised. In the second call, we ignore edge orientations and find that there is an undirected cycle. Note that the second call finds a directed cycle while effectively traversing an undirected graph, and so, we found an "undirected cycle". This means that this DAG structure does not form a directed tree (which is also known as a polytree). >>> import networkx as nx >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) >>> try: ... nx.find_cycle(G, orientation='original') ... except: ... pass ... >>> list(nx.find_cycle(G, orientation='ignore')) [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]<|endoftext|>
1727301a2e4f087a89b6e6a6208d465d6c746c27a380fa987484ab8228ac556c
def _unblock(thisnode): 'Recursively unblock and remove nodes from B[thisnode].' if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop())
Recursively unblock and remove nodes from B[thisnode].
src/networkx/algorithms/cycles.py
_unblock
MarletteFunding/aws-kube-codesuite
184
python
def _unblock(thisnode): if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop())
def _unblock(thisnode): if blocked[thisnode]: blocked[thisnode] = False while B[thisnode]: _unblock(B[thisnode].pop())<|docstring|>Recursively unblock and remove nodes from B[thisnode].<|endoftext|>
e77ac0fcaed2689a02cc873049ef5cb6c6dbea382c52a5666062e8d757109867
def add_attributes(self, **attribute_names): '\n defines the attributes that each record can have. If a database was previously defined and\n new attributes are added the database records are updated.\n :param attribute_names: The attribute names\n :param d: The dictionary/table name\n ' existing_attribute_names = list(map((lambda x: x[0]), self.con.execute('SELECT * from {}'.format(self.table_name)).description)) list_new_attributes = (set(existing_attribute_names).union(attribute_names.keys()) - set(existing_attribute_names)) for col in list_new_attributes: data = {'name': self.table_name, 'col': col, 'attribute': attribute_names[col]} self.cur.execute('ALTER TABLE {name} ADD COLUMN {col} {attribute}'.format(**data))
defines the attributes that each record can have. If a database was previously defined and new attributes are added the database records are updated. :param attribute_names: The attribute names :param d: The dictionary/table name
deprecated/dev/erika/PersistentDb.py
add_attributes
cloudmesh/client-deprecated
0
python
def add_attributes(self, **attribute_names): '\n defines the attributes that each record can have. If a database was previously defined and\n new attributes are added the database records are updated.\n :param attribute_names: The attribute names\n :param d: The dictionary/table name\n ' existing_attribute_names = list(map((lambda x: x[0]), self.con.execute('SELECT * from {}'.format(self.table_name)).description)) list_new_attributes = (set(existing_attribute_names).union(attribute_names.keys()) - set(existing_attribute_names)) for col in list_new_attributes: data = {'name': self.table_name, 'col': col, 'attribute': attribute_names[col]} self.cur.execute('ALTER TABLE {name} ADD COLUMN {col} {attribute}'.format(**data))
def add_attributes(self, **attribute_names): '\n defines the attributes that each record can have. If a database was previously defined and\n new attributes are added the database records are updated.\n :param attribute_names: The attribute names\n :param d: The dictionary/table name\n ' existing_attribute_names = list(map((lambda x: x[0]), self.con.execute('SELECT * from {}'.format(self.table_name)).description)) list_new_attributes = (set(existing_attribute_names).union(attribute_names.keys()) - set(existing_attribute_names)) for col in list_new_attributes: data = {'name': self.table_name, 'col': col, 'attribute': attribute_names[col]} self.cur.execute('ALTER TABLE {name} ADD COLUMN {col} {attribute}'.format(**data))<|docstring|>defines the attributes that each record can have. If a database was previously defined and new attributes are added the database records are updated. :param attribute_names: The attribute names :param d: The dictionary/table name<|endoftext|>
3eb5046f212ccb73505c0f6d996b79b03664625f3b3eeca360a11e98b64a058b
def add_attribute(self, name, data_type): '\n Alters the table with an additional column\n :param name: Name of the attribute\n :param data_type: Type of the attribute\n ' data = {'table': self.table_name, 'name': name, 'type': data_type} self.cur.execute('ALTER TABLE {table} ADD COLUMN {name} {type}'.format(**data))
Alters the table with an additional column :param name: Name of the attribute :param data_type: Type of the attribute
deprecated/dev/erika/PersistentDb.py
add_attribute
cloudmesh/client-deprecated
0
python
def add_attribute(self, name, data_type): '\n Alters the table with an additional column\n :param name: Name of the attribute\n :param data_type: Type of the attribute\n ' data = {'table': self.table_name, 'name': name, 'type': data_type} self.cur.execute('ALTER TABLE {table} ADD COLUMN {name} {type}'.format(**data))
def add_attribute(self, name, data_type): '\n Alters the table with an additional column\n :param name: Name of the attribute\n :param data_type: Type of the attribute\n ' data = {'table': self.table_name, 'name': name, 'type': data_type} self.cur.execute('ALTER TABLE {table} ADD COLUMN {name} {type}'.format(**data))<|docstring|>Alters the table with an additional column :param name: Name of the attribute :param data_type: Type of the attribute<|endoftext|>
244b6c86e9139fd879e48ad94618308268cbadd2d95f1895719af58067f2d164
def get_attribute_type(self, name): '\n This returns the data type of the attribute\n :param name: Name of the attribute\n :return: The type of the attribute\n ' result = self.cur.execute('PRAGMA table_info({})'.format(self.table_name)).fetchall() for res in result: if (res[1] == name): return res[2]
This returns the data type of the attribute :param name: Name of the attribute :return: The type of the attribute
deprecated/dev/erika/PersistentDb.py
get_attribute_type
cloudmesh/client-deprecated
0
python
def get_attribute_type(self, name): '\n This returns the data type of the attribute\n :param name: Name of the attribute\n :return: The type of the attribute\n ' result = self.cur.execute('PRAGMA table_info({})'.format(self.table_name)).fetchall() for res in result: if (res[1] == name): return res[2]
def get_attribute_type(self, name): '\n This returns the data type of the attribute\n :param name: Name of the attribute\n :return: The type of the attribute\n ' result = self.cur.execute('PRAGMA table_info({})'.format(self.table_name)).fetchall() for res in result: if (res[1] == name): return res[2]<|docstring|>This returns the data type of the attribute :param name: Name of the attribute :return: The type of the attribute<|endoftext|>
daf68389fb0b4fb558d568d68fc80d2e27114204d70912bf9fa07d53d646aac8
def add(self, **kwargs): '\n adds the dict to the database. The attribute "name" is used to define a\n unique name for the object in the database\n :param record: the dict that at minimum must contain a name attribute\n ' column_names = ', '.join(kwargs.keys()) placeholders = ', '.join(('?' * len(kwargs.values()))) sql = 'INSERT INTO {} ({}) VALUES ({})'.format(self.table_name, column_names, placeholders) self.cur.execute(sql, tuple(kwargs.values())) self.con.commit()
adds the dict to the database. The attribute "name" is used to define a unique name for the object in the database :param record: the dict that at minimum must contain a name attribute
deprecated/dev/erika/PersistentDb.py
add
cloudmesh/client-deprecated
0
python
def add(self, **kwargs): '\n adds the dict to the database. The attribute "name" is used to define a\n unique name for the object in the database\n :param record: the dict that at minimum must contain a name attribute\n ' column_names = ', '.join(kwargs.keys()) placeholders = ', '.join(('?' * len(kwargs.values()))) sql = 'INSERT INTO {} ({}) VALUES ({})'.format(self.table_name, column_names, placeholders) self.cur.execute(sql, tuple(kwargs.values())) self.con.commit()
def add(self, **kwargs): '\n adds the dict to the database. The attribute "name" is used to define a\n unique name for the object in the database\n :param record: the dict that at minimum must contain a name attribute\n ' column_names = ', '.join(kwargs.keys()) placeholders = ', '.join(('?' * len(kwargs.values()))) sql = 'INSERT INTO {} ({}) VALUES ({})'.format(self.table_name, column_names, placeholders) self.cur.execute(sql, tuple(kwargs.values())) self.con.commit()<|docstring|>adds the dict to the database. The attribute "name" is used to define a unique name for the object in the database :param record: the dict that at minimum must contain a name attribute<|endoftext|>
c8daf83bacfad9791642873a49eab130788f711f278db987e59ec5549933edbc
def delete(self, operator, **kwargs): '\n deletes all elements in that match the query formulated by kwargs and the operator\n operators that are allowed are = and, or\n :param kwargs: the attributes that we look for\n :param operator: the operator and / or\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} self.cur.execute('DELETE FROM {table} WHERE {key_values} '.format(**data)) self.con.commit()
deletes all elements in that match the query formulated by kwargs and the operator operators that are allowed are = and, or :param kwargs: the attributes that we look for :param operator: the operator and / or
deprecated/dev/erika/PersistentDb.py
delete
cloudmesh/client-deprecated
0
python
def delete(self, operator, **kwargs): '\n deletes all elements in that match the query formulated by kwargs and the operator\n operators that are allowed are = and, or\n :param kwargs: the attributes that we look for\n :param operator: the operator and / or\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} self.cur.execute('DELETE FROM {table} WHERE {key_values} '.format(**data)) self.con.commit()
def delete(self, operator, **kwargs): '\n deletes all elements in that match the query formulated by kwargs and the operator\n operators that are allowed are = and, or\n :param kwargs: the attributes that we look for\n :param operator: the operator and / or\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} self.cur.execute('DELETE FROM {table} WHERE {key_values} '.format(**data)) self.con.commit()<|docstring|>deletes all elements in that match the query formulated by kwargs and the operator operators that are allowed are = and, or :param kwargs: the attributes that we look for :param operator: the operator and / or<|endoftext|>
99d3cdb95da027a6dd1cbc2e29342757ea45745ad90933c8f778ebe07a6e646c
def find(self, operator, **kwargs): '\n Finds all the elements that match the query formulated by kwargs and the operator.\n :param operator: The operators and / or\n :param kwargs: The attributes that we look for\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} result = self.cur.execute('SELECT * FROM {table} WHERE {key_values}'.format(**data)) recs_list = result.fetchall() print(recs_list)
Finds all the elements that match the query formulated by kwargs and the operator. :param operator: The operators and / or :param kwargs: The attributes that we look for
deprecated/dev/erika/PersistentDb.py
find
cloudmesh/client-deprecated
0
python
def find(self, operator, **kwargs): '\n Finds all the elements that match the query formulated by kwargs and the operator.\n :param operator: The operators and / or\n :param kwargs: The attributes that we look for\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} result = self.cur.execute('SELECT * FROM {table} WHERE {key_values}'.format(**data)) recs_list = result.fetchall() print(recs_list)
def find(self, operator, **kwargs): '\n Finds all the elements that match the query formulated by kwargs and the operator.\n :param operator: The operators and / or\n :param kwargs: The attributes that we look for\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} result = self.cur.execute('SELECT * FROM {table} WHERE {key_values}'.format(**data)) recs_list = result.fetchall() print(recs_list)<|docstring|>Finds all the elements that match the query formulated by kwargs and the operator. :param operator: The operators and / or :param kwargs: The attributes that we look for<|endoftext|>
3d2a42991c6e099df2c641bc405abd0b5c974903a96c29d786731a738b8d0990
def get(self, operator, **kwargs): '\n Finds the first element that match the query formulated by kwargs and the operator\n :param operator: The operators and / or\n :param kwargs: The attributes that we look for\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} result = self.cur.execute('SELECT * FROM {table} WHERE {key_values}'.format(**data)) rec = result.fetchone() print(rec)
Finds the first element that match the query formulated by kwargs and the operator :param operator: The operators and / or :param kwargs: The attributes that we look for
deprecated/dev/erika/PersistentDb.py
get
cloudmesh/client-deprecated
0
python
def get(self, operator, **kwargs): '\n Finds the first element that match the query formulated by kwargs and the operator\n :param operator: The operators and / or\n :param kwargs: The attributes that we look for\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} result = self.cur.execute('SELECT * FROM {table} WHERE {key_values}'.format(**data)) rec = result.fetchone() print(rec)
def get(self, operator, **kwargs): '\n Finds the first element that match the query formulated by kwargs and the operator\n :param operator: The operators and / or\n :param kwargs: The attributes that we look for\n ' data = {'table': self.table_name, 'key_values': ' {} '.format(operator).join((("%s='%s'" % (key, value)) for (key, value) in kwargs.items()))} result = self.cur.execute('SELECT * FROM {table} WHERE {key_values}'.format(**data)) rec = result.fetchone() print(rec)<|docstring|>Finds the first element that match the query formulated by kwargs and the operator :param operator: The operators and / or :param kwargs: The attributes that we look for<|endoftext|>
31d5c0ff1d61a6149df3e2c27579c05e69527393d680b4c6cb876d203e9f6a7a
@property def json(self): '\n :return: The json object of the database\n ' self.con.row_factory = sqlite.Row result = self.con.cursor().execute('SELECT * FROM {} '.format(self.table_name)).fetchall() return json.dumps([dict(row) for row in result])
:return: The json object of the database
deprecated/dev/erika/PersistentDb.py
json
cloudmesh/client-deprecated
0
python
@property def json(self): '\n \n ' self.con.row_factory = sqlite.Row result = self.con.cursor().execute('SELECT * FROM {} '.format(self.table_name)).fetchall() return json.dumps([dict(row) for row in result])
@property def json(self): '\n \n ' self.con.row_factory = sqlite.Row result = self.con.cursor().execute('SELECT * FROM {} '.format(self.table_name)).fetchall() return json.dumps([dict(row) for row in result])<|docstring|>:return: The json object of the database<|endoftext|>
94e913b6d3a0c03d86cb31b1d480bdaaa0b23d194c38fe7100cdbc21ba4f6a60
def backup(self, file_name): '\n backs up the current database. If the filename is omitted, the backup will be\n created in the same directory as the database with the postfix .bak.#\n where # is the largest number that has not yet been assigned. For example.,\n lets assume a backup exists with the name file.bak.9, than the next higher\n number is used (e.g. 10) and the backup file file.back.10 is used.\n :param file_name: the backup filename\n ' backup_file_no = (len([name for name in os.listdir('.') if (os.path.isfile(name) and (file_name in name))]) + 1) backup_file = open(((file_name + '.bak.') + str(backup_file_no)), 'w') backup_file.write(self.json) backup_file.close()
backs up the current database. If the filename is omitted, the backup will be created in the same directory as the database with the postfix .bak.# where # is the largest number that has not yet been assigned. For example., lets assume a backup exists with the name file.bak.9, than the next higher number is used (e.g. 10) and the backup file file.back.10 is used. :param file_name: the backup filename
deprecated/dev/erika/PersistentDb.py
backup
cloudmesh/client-deprecated
0
python
def backup(self, file_name): '\n backs up the current database. If the filename is omitted, the backup will be\n created in the same directory as the database with the postfix .bak.#\n where # is the largest number that has not yet been assigned. For example.,\n lets assume a backup exists with the name file.bak.9, than the next higher\n number is used (e.g. 10) and the backup file file.back.10 is used.\n :param file_name: the backup filename\n ' backup_file_no = (len([name for name in os.listdir('.') if (os.path.isfile(name) and (file_name in name))]) + 1) backup_file = open(((file_name + '.bak.') + str(backup_file_no)), 'w') backup_file.write(self.json) backup_file.close()
def backup(self, file_name): '\n backs up the current database. If the filename is omitted, the backup will be\n created in the same directory as the database with the postfix .bak.#\n where # is the largest number that has not yet been assigned. For example.,\n lets assume a backup exists with the name file.bak.9, than the next higher\n number is used (e.g. 10) and the backup file file.back.10 is used.\n :param file_name: the backup filename\n ' backup_file_no = (len([name for name in os.listdir('.') if (os.path.isfile(name) and (file_name in name))]) + 1) backup_file = open(((file_name + '.bak.') + str(backup_file_no)), 'w') backup_file.write(self.json) backup_file.close()<|docstring|>backs up the current database. If the filename is omitted, the backup will be created in the same directory as the database with the postfix .bak.# where # is the largest number that has not yet been assigned. For example., lets assume a backup exists with the name file.bak.9, than the next higher number is used (e.g. 10) and the backup file file.back.10 is used. :param file_name: the backup filename<|endoftext|>
42b89eca09679a5efe0dbcc52984324c85461eac85123d47a4e324d4549474e8
def AlistipesPutredinis(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Alistipes putredinis graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Alistipes putredinis graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 20:50:59.037813\n\t\n\tThe undirected graph Alistipes putredinis has 2704 nodes and 118704 weighted\n\tedges, of which none are self-loops. The graph is dense as it has a density\n\tof 0.03248 and has 33 connected components, where the component with most\n\tnodes has 2615 nodes and the component with the least nodes has 2 nodes.\n\tThe graph median node degree is 61, the mean node degree is 87.80, and\n\tthe node degree mode is 3. The top 5 most central nodes are 445970.ALIPUT_02098\n\t(degree 830), 445970.ALIPUT_01698 (degree 713), 445970.ALIPUT_01701 (degree\n\t655), 445970.ALIPUT_00077 (degree 624) and 445970.ALIPUT_02006 (degree\n\t577).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import AlistipesPutredinis\n\t\n\t # Then load the graph\n\t graph = AlistipesPutredinis()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='AlistipesPutredinis', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
Return new instance of the Alistipes putredinis graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Alistipes putredinis graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 20:50:59.037813 The undirected graph Alistipes putredinis has 2704 nodes and 118704 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.03248 and has 33 connected components, where the component with most nodes has 2615 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 61, the mean node degree is 87.80, and the node degree mode is 3. The top 5 most central nodes are 445970.ALIPUT_02098 (degree 830), 445970.ALIPUT_01698 (degree 713), 445970.ALIPUT_01701 (degree 655), 445970.ALIPUT_00077 (degree 624) and 445970.ALIPUT_02006 (degree 577). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import AlistipesPutredinis # Then load the graph graph = AlistipesPutredinis() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.
bindings/python/ensmallen_graph/datasets/string/alistipesputredinis.py
AlistipesPutredinis
caufieldjh/ensmallen_graph
0
python
def AlistipesPutredinis(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Alistipes putredinis graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Alistipes putredinis graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 20:50:59.037813\n\t\n\tThe undirected graph Alistipes putredinis has 2704 nodes and 118704 weighted\n\tedges, of which none are self-loops. The graph is dense as it has a density\n\tof 0.03248 and has 33 connected components, where the component with most\n\tnodes has 2615 nodes and the component with the least nodes has 2 nodes.\n\tThe graph median node degree is 61, the mean node degree is 87.80, and\n\tthe node degree mode is 3. The top 5 most central nodes are 445970.ALIPUT_02098\n\t(degree 830), 445970.ALIPUT_01698 (degree 713), 445970.ALIPUT_01701 (degree\n\t655), 445970.ALIPUT_00077 (degree 624) and 445970.ALIPUT_02006 (degree\n\t577).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import AlistipesPutredinis\n\t\n\t # Then load the graph\n\t graph = AlistipesPutredinis()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='AlistipesPutredinis', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
def AlistipesPutredinis(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Alistipes putredinis graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Alistipes putredinis graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 20:50:59.037813\n\t\n\tThe undirected graph Alistipes putredinis has 2704 nodes and 118704 weighted\n\tedges, of which none are self-loops. The graph is dense as it has a density\n\tof 0.03248 and has 33 connected components, where the component with most\n\tnodes has 2615 nodes and the component with the least nodes has 2 nodes.\n\tThe graph median node degree is 61, the mean node degree is 87.80, and\n\tthe node degree mode is 3. The top 5 most central nodes are 445970.ALIPUT_02098\n\t(degree 830), 445970.ALIPUT_01698 (degree 713), 445970.ALIPUT_01701 (degree\n\t655), 445970.ALIPUT_00077 (degree 624) and 445970.ALIPUT_02006 (degree\n\t577).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import AlistipesPutredinis\n\t\n\t # Then load the graph\n\t graph = AlistipesPutredinis()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='AlistipesPutredinis', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()<|docstring|>Return new instance of the Alistipes putredinis graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Alistipes putredinis graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 20:50:59.037813 The undirected graph Alistipes putredinis has 2704 nodes and 118704 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.03248 and has 33 connected components, where the component with most nodes has 2615 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 61, the mean node degree is 87.80, and the node degree mode is 3. The top 5 most central nodes are 445970.ALIPUT_02098 (degree 830), 445970.ALIPUT_01698 (degree 713), 445970.ALIPUT_01701 (degree 655), 445970.ALIPUT_00077 (degree 624) and 445970.ALIPUT_02006 (degree 577). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import AlistipesPutredinis # Then load the graph graph = AlistipesPutredinis() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.<|endoftext|>
c2d1ed24a699493fb3519ce0b5283ded6d2de0376e43317bcec39e6280c082b5
@classmethod def generate_live_csv_data(cls, today_entity_csv, entity, entries_in_interval): '\n Generates the live report using the `today_entity_csv` file received.\n ' with open(today_entity_csv, 'r') as log: objects_logs = {} lastest_entries = deque(csv.DictReader(log), entries_in_interval) for entry in lastest_entries: cls.procces_csv_row(entry, objects_logs) objects_logs_merged = {0: {'Occupancy': []}} for hour in objects_logs: objects_logs_merged[0]['Occupancy'].extend(objects_logs[hour]['Occupancy']) occupancy_live = cls.generate_hourly_metric_data(objects_logs_merged, entity)[0].tolist() occupancy_live.append(int(entity['occupancy_threshold'])) daily_violations = 0 entity_directory = entity['base_directory'] reports_directory = os.path.join(entity_directory, 'reports', cls.reports_folder) file_path = os.path.join(reports_directory, 'live.csv') if os.path.exists(file_path): with open(file_path, 'r') as live_file: lastest_entry = deque(csv.DictReader(live_file), 1)[0] if (datetime.strptime(lastest_entry['Time'], '%Y-%m-%d %H:%M:%S').date() == datetime.today().date()): daily_violations = int(lastest_entry['Violations']) if (occupancy_live[1] > occupancy_live[2]): daily_violations += 1 occupancy_live.append(daily_violations) return occupancy_live
Generates the live report using the `today_entity_csv` file received.
libs/metrics/occupancy.py
generate_live_csv_data
myunyui22/smart-social-distancing-dev
0
python
@classmethod def generate_live_csv_data(cls, today_entity_csv, entity, entries_in_interval): '\n \n ' with open(today_entity_csv, 'r') as log: objects_logs = {} lastest_entries = deque(csv.DictReader(log), entries_in_interval) for entry in lastest_entries: cls.procces_csv_row(entry, objects_logs) objects_logs_merged = {0: {'Occupancy': []}} for hour in objects_logs: objects_logs_merged[0]['Occupancy'].extend(objects_logs[hour]['Occupancy']) occupancy_live = cls.generate_hourly_metric_data(objects_logs_merged, entity)[0].tolist() occupancy_live.append(int(entity['occupancy_threshold'])) daily_violations = 0 entity_directory = entity['base_directory'] reports_directory = os.path.join(entity_directory, 'reports', cls.reports_folder) file_path = os.path.join(reports_directory, 'live.csv') if os.path.exists(file_path): with open(file_path, 'r') as live_file: lastest_entry = deque(csv.DictReader(live_file), 1)[0] if (datetime.strptime(lastest_entry['Time'], '%Y-%m-%d %H:%M:%S').date() == datetime.today().date()): daily_violations = int(lastest_entry['Violations']) if (occupancy_live[1] > occupancy_live[2]): daily_violations += 1 occupancy_live.append(daily_violations) return occupancy_live
@classmethod def generate_live_csv_data(cls, today_entity_csv, entity, entries_in_interval): '\n \n ' with open(today_entity_csv, 'r') as log: objects_logs = {} lastest_entries = deque(csv.DictReader(log), entries_in_interval) for entry in lastest_entries: cls.procces_csv_row(entry, objects_logs) objects_logs_merged = {0: {'Occupancy': []}} for hour in objects_logs: objects_logs_merged[0]['Occupancy'].extend(objects_logs[hour]['Occupancy']) occupancy_live = cls.generate_hourly_metric_data(objects_logs_merged, entity)[0].tolist() occupancy_live.append(int(entity['occupancy_threshold'])) daily_violations = 0 entity_directory = entity['base_directory'] reports_directory = os.path.join(entity_directory, 'reports', cls.reports_folder) file_path = os.path.join(reports_directory, 'live.csv') if os.path.exists(file_path): with open(file_path, 'r') as live_file: lastest_entry = deque(csv.DictReader(live_file), 1)[0] if (datetime.strptime(lastest_entry['Time'], '%Y-%m-%d %H:%M:%S').date() == datetime.today().date()): daily_violations = int(lastest_entry['Violations']) if (occupancy_live[1] > occupancy_live[2]): daily_violations += 1 occupancy_live.append(daily_violations) return occupancy_live<|docstring|>Generates the live report using the `today_entity_csv` file received.<|endoftext|>
f92220a3ee99494b4668602b6a86c772fb667f81e2f9af71c4b7699d66023590
def test_td3_same_seed(self): '\n Test whether multiple runs with the same seed leads to exact same model weights for their TD3\n ' td3 = TD3(state_dim=self.state_dim, action_dim=self.action_dim, max_action=self.max_action) file_name = f'TD3_{self.env_str}_{self.seed}' td3_params = Namespace(policy='TD3', env=self.env_str, seed=self.seed, start_timesteps=1000, eval_freq=5000.0, max_timesteps=2000, expl_noise=0.1, batch_size=256, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2, save_model=True, load_model='', file_name=file_name, dest_model_path='./test/models', dest_res_path='./test/results') td3_main(vars(td3_params)) td3.load(f'./test/models/{file_name}') actor = deepcopy(td3.actor) critic = deepcopy(td3.critic) td3_main(vars(td3_params)) td3.load(f'./test/models/{file_name}') for (x, y) in zip(actor.parameters(), td3.actor.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(critic.parameters(), td3.critic.parameters()): self.assertTrue(torch.equal(x, y))
Test whether multiple runs with the same seed leads to exact same model weights for their TD3
test/test_reproducibility.py
test_td3_same_seed
Heyjuke58/sac_reproduction
0
python
def test_td3_same_seed(self): '\n \n ' td3 = TD3(state_dim=self.state_dim, action_dim=self.action_dim, max_action=self.max_action) file_name = f'TD3_{self.env_str}_{self.seed}' td3_params = Namespace(policy='TD3', env=self.env_str, seed=self.seed, start_timesteps=1000, eval_freq=5000.0, max_timesteps=2000, expl_noise=0.1, batch_size=256, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2, save_model=True, load_model=, file_name=file_name, dest_model_path='./test/models', dest_res_path='./test/results') td3_main(vars(td3_params)) td3.load(f'./test/models/{file_name}') actor = deepcopy(td3.actor) critic = deepcopy(td3.critic) td3_main(vars(td3_params)) td3.load(f'./test/models/{file_name}') for (x, y) in zip(actor.parameters(), td3.actor.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(critic.parameters(), td3.critic.parameters()): self.assertTrue(torch.equal(x, y))
def test_td3_same_seed(self): '\n \n ' td3 = TD3(state_dim=self.state_dim, action_dim=self.action_dim, max_action=self.max_action) file_name = f'TD3_{self.env_str}_{self.seed}' td3_params = Namespace(policy='TD3', env=self.env_str, seed=self.seed, start_timesteps=1000, eval_freq=5000.0, max_timesteps=2000, expl_noise=0.1, batch_size=256, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2, save_model=True, load_model=, file_name=file_name, dest_model_path='./test/models', dest_res_path='./test/results') td3_main(vars(td3_params)) td3.load(f'./test/models/{file_name}') actor = deepcopy(td3.actor) critic = deepcopy(td3.critic) td3_main(vars(td3_params)) td3.load(f'./test/models/{file_name}') for (x, y) in zip(actor.parameters(), td3.actor.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(critic.parameters(), td3.critic.parameters()): self.assertTrue(torch.equal(x, y))<|docstring|>Test whether multiple runs with the same seed leads to exact same model weights for their TD3<|endoftext|>
5b276c5d0d61826b262c150cd3597d662572ccdb9df31f867495ab709e1d1780
def test_sac_same_seed(self): '\n Test whether multiple runs with the same seed leads to exact same model weights for our SAC\n ' sac_hpars = SAC_HOPPER.copy() sac_hpars.update({'seed': 12, 'max_env_steps': 2000, 'file_name': 'sac', 'dest_model_path': './test/models', 'dest_res_path': './test/results'}) sac1 = SACTrainer(**sac_hpars) sac1.train() sac2 = SACTrainer(**sac_hpars) sac2.train() for (x, y) in zip(sac1.policy.parameters(), sac2.policy.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf1.parameters(), sac2.qf1.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf2.parameters(), sac2.qf2.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.value.parameters(), sac2.value.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.target_value.parameters(), sac2.target_value.parameters()): self.assertTrue(torch.equal(x, y))
Test whether multiple runs with the same seed leads to exact same model weights for our SAC
test/test_reproducibility.py
test_sac_same_seed
Heyjuke58/sac_reproduction
0
python
def test_sac_same_seed(self): '\n \n ' sac_hpars = SAC_HOPPER.copy() sac_hpars.update({'seed': 12, 'max_env_steps': 2000, 'file_name': 'sac', 'dest_model_path': './test/models', 'dest_res_path': './test/results'}) sac1 = SACTrainer(**sac_hpars) sac1.train() sac2 = SACTrainer(**sac_hpars) sac2.train() for (x, y) in zip(sac1.policy.parameters(), sac2.policy.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf1.parameters(), sac2.qf1.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf2.parameters(), sac2.qf2.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.value.parameters(), sac2.value.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.target_value.parameters(), sac2.target_value.parameters()): self.assertTrue(torch.equal(x, y))
def test_sac_same_seed(self): '\n \n ' sac_hpars = SAC_HOPPER.copy() sac_hpars.update({'seed': 12, 'max_env_steps': 2000, 'file_name': 'sac', 'dest_model_path': './test/models', 'dest_res_path': './test/results'}) sac1 = SACTrainer(**sac_hpars) sac1.train() sac2 = SACTrainer(**sac_hpars) sac2.train() for (x, y) in zip(sac1.policy.parameters(), sac2.policy.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf1.parameters(), sac2.qf1.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf2.parameters(), sac2.qf2.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.value.parameters(), sac2.value.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.target_value.parameters(), sac2.target_value.parameters()): self.assertTrue(torch.equal(x, y))<|docstring|>Test whether multiple runs with the same seed leads to exact same model weights for our SAC<|endoftext|>
9a8b89e79735ac7c13f35d24dd03cd577f139406d45806aa69609222e087bc74
def test_sac_v2_same_seed(self): '\n Test whether multiple runs with the same seed leads to exact same model weights for our SAC V2\n ' sac_v2_hpars = SAC_V2_HOPPER.copy() sac_v2_hpars.update({'seed': 12, 'max_env_steps': 2000, 'file_name': 'sac_v2', 'dest_model_path': './test/models', 'dest_res_path': './test/results'}) sac1 = SACTrainerV2(**sac_v2_hpars) sac1.train() sac2 = SACTrainerV2(**sac_v2_hpars) sac2.train() for (x, y) in zip(sac1.policy.parameters(), sac2.policy.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf1.parameters(), sac2.qf1.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf2.parameters(), sac2.qf2.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.value.parameters(), sac2.value.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.target_value.parameters(), sac2.target_value.parameters()): self.assertTrue(torch.equal(x, y))
Test whether multiple runs with the same seed leads to exact same model weights for our SAC V2
test/test_reproducibility.py
test_sac_v2_same_seed
Heyjuke58/sac_reproduction
0
python
def test_sac_v2_same_seed(self): '\n \n ' sac_v2_hpars = SAC_V2_HOPPER.copy() sac_v2_hpars.update({'seed': 12, 'max_env_steps': 2000, 'file_name': 'sac_v2', 'dest_model_path': './test/models', 'dest_res_path': './test/results'}) sac1 = SACTrainerV2(**sac_v2_hpars) sac1.train() sac2 = SACTrainerV2(**sac_v2_hpars) sac2.train() for (x, y) in zip(sac1.policy.parameters(), sac2.policy.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf1.parameters(), sac2.qf1.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf2.parameters(), sac2.qf2.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.value.parameters(), sac2.value.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.target_value.parameters(), sac2.target_value.parameters()): self.assertTrue(torch.equal(x, y))
def test_sac_v2_same_seed(self): '\n \n ' sac_v2_hpars = SAC_V2_HOPPER.copy() sac_v2_hpars.update({'seed': 12, 'max_env_steps': 2000, 'file_name': 'sac_v2', 'dest_model_path': './test/models', 'dest_res_path': './test/results'}) sac1 = SACTrainerV2(**sac_v2_hpars) sac1.train() sac2 = SACTrainerV2(**sac_v2_hpars) sac2.train() for (x, y) in zip(sac1.policy.parameters(), sac2.policy.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf1.parameters(), sac2.qf1.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.qf2.parameters(), sac2.qf2.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.value.parameters(), sac2.value.parameters()): self.assertTrue(torch.equal(x, y)) for (x, y) in zip(sac1.target_value.parameters(), sac2.target_value.parameters()): self.assertTrue(torch.equal(x, y))<|docstring|>Test whether multiple runs with the same seed leads to exact same model weights for our SAC V2<|endoftext|>
5fed1e514fc9f5482a479d8b5ebda48c3e3c3fa94f8f3655b856b075b3bd2c3c
def __init__(self, *, df: Optional[pd.DataFrame]=None, csv_path: Optional[Union[(str, Path)]]=None, remove_id_substring: Optional[bool]=False): "Init function.\n\n Only `df` or `csv_path` can be specified.\n\n Keyword Arguments:\n df {Optional[pd.DataFrame]}\n -- A raw dataframe. (default: {None})\n csv_path {Optional[Union[str, Path]]}\n -- Path to a dataframe (default: {None})\n remove_id_substring {Optional[bool]}\n -- If True, replaces 'id'-like substrings in column names\n of dataframes (default: False)\n pattern {Pattern}\n -- Compiled regex pattern\n\n Raises:\n ValueError: When both `df` and `csv_path` are specified\n TypeError: When `df` is not a pd.DataFrame\n TypeError: When `csv_path` is not a str or a Path\n ValueError: When `csv_path` is not a csv file\n " if ((df is not None) == (csv_path is not None)): raise ValueError('Only one of `df` or `csv_path` can be provided.') if ((df is not None) and (not isinstance(df, pd.DataFrame))): raise TypeError(f'Expecting `df` of type pd.DataFrame. Got type {type(df)}.') if (csv_path is not None): if (not (isinstance(csv_path, str) or isinstance(csv_path, Path))): raise TypeError(f'Expecting `csv_path` of type str or Path. Got type {type(csv_path)}.') if (str(csv_path)[(- 3):].lower() != 'csv'): raise ValueError('A CSV file is expected for `csv_path`.') self.df = df self.csv_path = csv_path self.remove_id_substring = remove_id_substring self.pattern = re.compile('_?[iI][dD]')
Init function. Only `df` or `csv_path` can be specified. Keyword Arguments: df {Optional[pd.DataFrame]} -- A raw dataframe. (default: {None}) csv_path {Optional[Union[str, Path]]} -- Path to a dataframe (default: {None}) remove_id_substring {Optional[bool]} -- If True, replaces 'id'-like substrings in column names of dataframes (default: False) pattern {Pattern} -- Compiled regex pattern Raises: ValueError: When both `df` and `csv_path` are specified TypeError: When `df` is not a pd.DataFrame TypeError: When `csv_path` is not a str or a Path ValueError: When `csv_path` is not a csv file
foreshadow/smart/intent_resolving/core/data_set_parsers/lazy_dataframe_loader.py
__init__
adithyabsk/foreshadow
25
python
def __init__(self, *, df: Optional[pd.DataFrame]=None, csv_path: Optional[Union[(str, Path)]]=None, remove_id_substring: Optional[bool]=False): "Init function.\n\n Only `df` or `csv_path` can be specified.\n\n Keyword Arguments:\n df {Optional[pd.DataFrame]}\n -- A raw dataframe. (default: {None})\n csv_path {Optional[Union[str, Path]]}\n -- Path to a dataframe (default: {None})\n remove_id_substring {Optional[bool]}\n -- If True, replaces 'id'-like substrings in column names\n of dataframes (default: False)\n pattern {Pattern}\n -- Compiled regex pattern\n\n Raises:\n ValueError: When both `df` and `csv_path` are specified\n TypeError: When `df` is not a pd.DataFrame\n TypeError: When `csv_path` is not a str or a Path\n ValueError: When `csv_path` is not a csv file\n " if ((df is not None) == (csv_path is not None)): raise ValueError('Only one of `df` or `csv_path` can be provided.') if ((df is not None) and (not isinstance(df, pd.DataFrame))): raise TypeError(f'Expecting `df` of type pd.DataFrame. Got type {type(df)}.') if (csv_path is not None): if (not (isinstance(csv_path, str) or isinstance(csv_path, Path))): raise TypeError(f'Expecting `csv_path` of type str or Path. Got type {type(csv_path)}.') if (str(csv_path)[(- 3):].lower() != 'csv'): raise ValueError('A CSV file is expected for `csv_path`.') self.df = df self.csv_path = csv_path self.remove_id_substring = remove_id_substring self.pattern = re.compile('_?[iI][dD]')
def __init__(self, *, df: Optional[pd.DataFrame]=None, csv_path: Optional[Union[(str, Path)]]=None, remove_id_substring: Optional[bool]=False): "Init function.\n\n Only `df` or `csv_path` can be specified.\n\n Keyword Arguments:\n df {Optional[pd.DataFrame]}\n -- A raw dataframe. (default: {None})\n csv_path {Optional[Union[str, Path]]}\n -- Path to a dataframe (default: {None})\n remove_id_substring {Optional[bool]}\n -- If True, replaces 'id'-like substrings in column names\n of dataframes (default: False)\n pattern {Pattern}\n -- Compiled regex pattern\n\n Raises:\n ValueError: When both `df` and `csv_path` are specified\n TypeError: When `df` is not a pd.DataFrame\n TypeError: When `csv_path` is not a str or a Path\n ValueError: When `csv_path` is not a csv file\n " if ((df is not None) == (csv_path is not None)): raise ValueError('Only one of `df` or `csv_path` can be provided.') if ((df is not None) and (not isinstance(df, pd.DataFrame))): raise TypeError(f'Expecting `df` of type pd.DataFrame. Got type {type(df)}.') if (csv_path is not None): if (not (isinstance(csv_path, str) or isinstance(csv_path, Path))): raise TypeError(f'Expecting `csv_path` of type str or Path. Got type {type(csv_path)}.') if (str(csv_path)[(- 3):].lower() != 'csv'): raise ValueError('A CSV file is expected for `csv_path`.') self.df = df self.csv_path = csv_path self.remove_id_substring = remove_id_substring self.pattern = re.compile('_?[iI][dD]')<|docstring|>Init function. Only `df` or `csv_path` can be specified. Keyword Arguments: df {Optional[pd.DataFrame]} -- A raw dataframe. (default: {None}) csv_path {Optional[Union[str, Path]]} -- Path to a dataframe (default: {None}) remove_id_substring {Optional[bool]} -- If True, replaces 'id'-like substrings in column names of dataframes (default: False) pattern {Pattern} -- Compiled regex pattern Raises: ValueError: When both `df` and `csv_path` are specified TypeError: When `df` is not a pd.DataFrame TypeError: When `csv_path` is not a str or a Path ValueError: When `csv_path` is not a csv file<|endoftext|>