code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
# Author: Dr. Konstantin Selyunin
# License: MIT
# Created: 2021.08.30
import logging
import os.path
import struct
from abc import abstractmethod, ABC
from typing import Union, Tuple
from rsl_xml_svd.rsl_svd_parser import RslSvdParser
class UM8Registers(ABC):
def __init__(self, **kwargs):
self.svd_parser = RslSvdParser(svd_file=UM8Registers.find_svd('um8.svd'))
@staticmethod
def find_svd(svd_file_name: str):
parent_dir = os.path.join(os.path.dirname(__file__), os.pardir)
for root, dirs, files in os.walk(parent_dir):
if svd_file_name in files:
return os.path.join(root, svd_file_name)
@abstractmethod
def connect(self, *args, **kwargs):
pass
@abstractmethod
def read_register(self, reg_addr: int, **kw) -> bytes:
pass
@abstractmethod
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float], **kw):
pass
@property
def creg_com_settings(self):
"""
The CREG_COM_SETTINGS register is used to set the boards serial port baud rate and to enable (disable) the
automatic transmission of sensor data and estimated states (telemetry).
Payload structure:
[31:28] : BAUD_RATE -- Sets the baud rate of the boards main serial port:
[27:24] : GPS_BAUD -- Sets the baud rate of the UM8 auxiliary serial port:
[8] : GPS -- If set, this bit causes GPS data to be transmitted automatically whenever new GPS data is received. GPS data is stored in registers 125 to 130. These registers will be transmitted in a batch packet of length 6 starting at address 125.
[4] : SAT -- If set, this bit causes satellite details to be transmitted whenever they are provided by the GPS. Satellite information is stored in registers 131 to 136. These registers will be transmitted in a batch packet of length 6 beginning at address 131.
:return: BAUD_RATE as bitField; GPS_BAUD as bitField; GPS as bitField; SAT as bitField;
"""
addr = 0x00
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for BAUD_RATE bit field
baud_rate_val = (reg.raw_value >> 28) & 0x000F
baud_rate_enum = reg.find_field_by(name='BAUD_RATE').find_enum_entry_by(value=baud_rate_val)
# find value for GPS_BAUD bit field
gps_baud_val = (reg.raw_value >> 24) & 0x000F
gps_baud_enum = reg.find_field_by(name='GPS_BAUD').find_enum_entry_by(value=gps_baud_val)
# find value for GPS bit field
gps_val = (reg.raw_value >> 8) & 0x0001
gps_enum = reg.find_field_by(name='GPS').find_enum_entry_by(value=gps_val)
# find value for SAT bit field
sat_val = (reg.raw_value >> 4) & 0x0001
sat_enum = reg.find_field_by(name='SAT').find_enum_entry_by(value=sat_val)
return reg, baud_rate_enum, gps_baud_enum, gps_enum, sat_enum
@creg_com_settings.setter
def creg_com_settings(self, new_value):
addr = 0x00
self.write_register(addr, new_value)
@property
def creg_com_rates1(self):
"""
The CREG_COM_RATES1 register sets desired telemetry transmission rates in Hz for raw accelerometer, gyro, and
magnetometer data. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : RAW_ACCEL_RATE -- Specifies the desired raw accelerometer data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_GYRO_RATE -- Specifies the desired raw gyro data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : RAW_MAG_RATE -- Specifies the desired raw magnetometer data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: RAW_ACCEL_RATE as uint8_t; RAW_GYRO_RATE as uint8_t; RAW_MAG_RATE as uint8_t;
"""
addr = 0x01
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
raw_accel_rate, raw_gyro_rate, raw_mag_rate = struct.unpack('>BBBx', payload[0:4])
return reg, raw_accel_rate, raw_gyro_rate, raw_mag_rate
@creg_com_rates1.setter
def creg_com_rates1(self, new_value):
addr = 0x01
self.write_register(addr, new_value)
@property
def creg_com_rates2(self):
"""
The CREG_COM_RATES2 register sets desired telemetry transmission rates for all raw data and temperature. If
the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : TEMP_RATE -- Specifies the desired broadcast rate for temperature data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_RAW_RATE -- Specifies the desired broadcast rate for all raw sensor data. If set, this overrides the broadcast rate setting for individual raw data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: TEMP_RATE as uint8_t; ALL_RAW_RATE as uint8_t;
"""
addr = 0x02
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
temp_rate, all_raw_rate = struct.unpack('>BxxB', payload[0:4])
return reg, temp_rate, all_raw_rate
@creg_com_rates2.setter
def creg_com_rates2(self, new_value):
addr = 0x02
self.write_register(addr, new_value)
@property
def creg_com_rates3(self):
"""
The CREG_COM_RATES3 register sets desired telemetry transmission rates for processed sensor data. If the
specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : PROC_ACCEL_RATE -- Specifies the desired broadcast rate for processed accelerometer data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : PROC_GYRO_RATE -- Specifies the desired broadcast rate for processed rate gyro data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : PROC_MAG_RATE -- Specifies the desired broadcast rate for processed magnetometer data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_ACCEL_RATE as uint8_t; PROC_GYRO_RATE as uint8_t; PROC_MAG_RATE as uint8_t;
"""
addr = 0x03
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES3')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_accel_rate, proc_gyro_rate, proc_mag_rate = struct.unpack('>BBBx', payload[0:4])
return reg, proc_accel_rate, proc_gyro_rate, proc_mag_rate
@creg_com_rates3.setter
def creg_com_rates3(self, new_value):
addr = 0x03
self.write_register(addr, new_value)
@property
def creg_com_rates4(self):
"""
The CREG_COM_RATES4 register defines the desired telemetry transmission rates for all processed data. If the
specified rate is 0, then no data is transmitted.
Payload structure:
[7:0] : ALL_PROC_RATE -- Specifies the desired broadcast rate for raw all processed data. If set, this overrides the broadcast rate setting for individual processed data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: ALL_PROC_RATE as uint8_t;
"""
addr = 0x04
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES4')
reg.raw_value, = struct.unpack('>xxxB', payload[0:4])
all_proc_rate, = struct.unpack('>xxxB', payload[0:4])
return reg, all_proc_rate,
@creg_com_rates4.setter
def creg_com_rates4(self, new_value):
addr = 0x04
self.write_register(addr, new_value)
@property
def creg_com_rates5(self):
"""
The CREG_COM_RATES5 register sets desired telemetry transmission rates for quaternions, Euler Angles,
position, and velocity estimates. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : QUAT_RATE -- Specifies the desired broadcast rate for quaternion data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : EULER_RATE -- Specifies the desired broadcast rate for Euler Angle data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : POSITION_RATE -- Specifies the desired broadcast rate position. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : VELOCITY_RATE -- Specifies the desired broadcast rate for velocity. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: QUAT_RATE as uint8_t; EULER_RATE as uint8_t; POSITION_RATE as uint8_t; VELOCITY_RATE as uint8_t;
"""
addr = 0x05
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES5')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_rate, euler_rate, position_rate, velocity_rate = struct.unpack('>BBBB', payload[0:4])
return reg, quat_rate, euler_rate, position_rate, velocity_rate
@creg_com_rates5.setter
def creg_com_rates5(self, new_value):
addr = 0x05
self.write_register(addr, new_value)
@property
def creg_com_rates6(self):
"""
The CREG_COM_RATES6 register sets desired telemetry transmission rates for pose (Euler/position packet),
health, and gyro bias estimates for the gyro 1 and gyro 2. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : POSE_RATE -- Specifies the desired broadcast rate for pose (Euler Angle and position) data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[19:16] : HEALTH_RATE -- Specifies the desired broadcast rate for the sensor health packet.
[15:8] : GYRO_BIAS_RATE -- Specifies the desired broadcast rate for gyro bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: POSE_RATE as uint8_t; HEALTH_RATE as bitField; GYRO_BIAS_RATE as uint8_t;
"""
addr = 0x06
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pose_rate, gyro_bias_rate = struct.unpack('>BxBx', payload[0:4])
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for HEALTH_RATE bit field
health_rate_val = (reg.raw_value >> 16) & 0x000F
health_rate_enum = reg.find_field_by(name='HEALTH_RATE').find_enum_entry_by(value=health_rate_val)
return reg, pose_rate, gyro_bias_rate, reg, health_rate_enum
@creg_com_rates6.setter
def creg_com_rates6(self, new_value):
addr = 0x06
self.write_register(addr, new_value)
@property
def creg_com_rates7(self):
"""
The CREG_COM_RATES7 register sets desired telemetry transmission rates in Hz for NMEA packets.
Payload structure:
[31:28] : NMEA_HEALTH_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style health packet.
[27:24] : NMEA_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style pose (Euler Angle/position) packet.
[23:20] : NMEA_ATTITUDE_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style attitude packet.
[19:16] : NMEA_SENSOR_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style sensor data packet.
[15:12] : NMEA_RATES_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style rate data packet.
[11:8] : NMEA_GPS_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style GPS pose packet.
[7:4] : NMEA_QUAT_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style quaternion packet.
:return: NMEA_HEALTH_RATE as bitField; NMEA_POSE_RATE as bitField; NMEA_ATTITUDE_RATE as bitField; NMEA_SENSOR_RATE as bitField; NMEA_RATES_RATE as bitField; NMEA_GPS_POSE_RATE as bitField; NMEA_QUAT_RATE as bitField;
"""
addr = 0x07
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES7')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for NMEA_HEALTH_RATE bit field
nmea_health_rate_val = (reg.raw_value >> 28) & 0x000F
nmea_health_rate_enum = reg.find_field_by(name='NMEA_HEALTH_RATE').find_enum_entry_by(value=nmea_health_rate_val)
# find value for NMEA_POSE_RATE bit field
nmea_pose_rate_val = (reg.raw_value >> 24) & 0x000F
nmea_pose_rate_enum = reg.find_field_by(name='NMEA_POSE_RATE').find_enum_entry_by(value=nmea_pose_rate_val)
# find value for NMEA_ATTITUDE_RATE bit field
nmea_attitude_rate_val = (reg.raw_value >> 20) & 0x000F
nmea_attitude_rate_enum = reg.find_field_by(name='NMEA_ATTITUDE_RATE').find_enum_entry_by(value=nmea_attitude_rate_val)
# find value for NMEA_SENSOR_RATE bit field
nmea_sensor_rate_val = (reg.raw_value >> 16) & 0x000F
nmea_sensor_rate_enum = reg.find_field_by(name='NMEA_SENSOR_RATE').find_enum_entry_by(value=nmea_sensor_rate_val)
# find value for NMEA_RATES_RATE bit field
nmea_rates_rate_val = (reg.raw_value >> 12) & 0x000F
nmea_rates_rate_enum = reg.find_field_by(name='NMEA_RATES_RATE').find_enum_entry_by(value=nmea_rates_rate_val)
# find value for NMEA_GPS_POSE_RATE bit field
nmea_gps_pose_rate_val = (reg.raw_value >> 8) & 0x000F
nmea_gps_pose_rate_enum = reg.find_field_by(name='NMEA_GPS_POSE_RATE').find_enum_entry_by(value=nmea_gps_pose_rate_val)
# find value for NMEA_QUAT_RATE bit field
nmea_quat_rate_val = (reg.raw_value >> 4) & 0x000F
nmea_quat_rate_enum = reg.find_field_by(name='NMEA_QUAT_RATE').find_enum_entry_by(value=nmea_quat_rate_val)
return reg, nmea_health_rate_enum, nmea_pose_rate_enum, nmea_attitude_rate_enum, nmea_sensor_rate_enum, nmea_rates_rate_enum, nmea_gps_pose_rate_enum, nmea_quat_rate_enum
@creg_com_rates7.setter
def creg_com_rates7(self, new_value):
addr = 0x07
self.write_register(addr, new_value)
@property
def creg_misc_settings(self):
"""
This register contains miscellaneous filter and sensor control options.
Payload structure:
[8] : PPS -- If set, this bit causes the TX2 pin on the IO Expansion header to be used as the PPS input from an external GPS module. PPS pulses will then be used to synchronize the system clock to UTC time of day.
[2] : ZG -- If set, this bit causes the device to attempt to measure the rate gyro bias on startup. The sensor must be stationary on startup for this feature to work properly.
[1] : Q -- If this bit is set, the sensor will run in quaternion mode instead of Euler Angle mode.
[0] : MAG -- If set, the magnetometer will be used in state updates.
:return: PPS as bitField; ZG as bitField; Q as bitField; MAG as bitField;
"""
addr = 0x08
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MISC_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for PPS bit field
pps_val = (reg.raw_value >> 8) & 0x0001
pps_enum = reg.find_field_by(name='PPS').find_enum_entry_by(value=pps_val)
# find value for ZG bit field
zg_val = (reg.raw_value >> 2) & 0x0001
zg_enum = reg.find_field_by(name='ZG').find_enum_entry_by(value=zg_val)
# find value for Q bit field
q_val = (reg.raw_value >> 1) & 0x0001
q_enum = reg.find_field_by(name='Q').find_enum_entry_by(value=q_val)
# find value for MAG bit field
mag_val = (reg.raw_value >> 0) & 0x0001
mag_enum = reg.find_field_by(name='MAG').find_enum_entry_by(value=mag_val)
return reg, pps_enum, zg_enum, q_enum, mag_enum
@creg_misc_settings.setter
def creg_misc_settings(self, new_value):
addr = 0x08
self.write_register(addr, new_value)
@property
def creg_home_north(self):
"""
This register sets the north home latitude in degrees, used to convert GPS coordinates to position in meters
from home.
Payload structure:
[31:0] : SET_HOME_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: SET_HOME_NORTH as float;
"""
addr = 0x09
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_HOME_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
set_home_north, = struct.unpack('>f', payload[0:4])
return reg, set_home_north,
@creg_home_north.setter
def creg_home_north(self, new_value):
addr = 0x09
self.write_register(addr, new_value)
@property
def creg_home_east(self):
"""
This register sets the east home longitude in degrees, used to convert GPS coordinates to position in meters
from home.
Payload structure:
[31:0] : SET_HOME_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: SET_HOME_EAST as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_HOME_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
set_home_east, = struct.unpack('>f', payload[0:4])
return reg, set_home_east,
@creg_home_east.setter
def creg_home_east(self, new_value):
addr = 0x0A
self.write_register(addr, new_value)
@property
def creg_home_up(self):
"""
This register sets the home altitude in meters. Used to convert GPS coordinates to position in meters from
home.
Payload structure:
[31:0] : SET_HOME_UP -- Altitude Position (32-bit IEEE Floating Point Value)
:return: SET_HOME_UP as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_HOME_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
set_home_up, = struct.unpack('>f', payload[0:4])
return reg, set_home_up,
@creg_home_up.setter
def creg_home_up(self, new_value):
addr = 0x0B
self.write_register(addr, new_value)
@property
def creg_gyro_trim_x(self):
"""
This register sets the x-axis rate gyro trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_TRIM_X as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_trim_x,
@creg_gyro_trim_x.setter
def creg_gyro_trim_x(self, new_value):
addr = 0x0C
self.write_register(addr, new_value)
@property
def creg_gyro_trim_y(self):
"""
This register sets the y-axis rate gyro trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_TRIM_Y as float;
"""
addr = 0x0D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_trim_y,
@creg_gyro_trim_y.setter
def creg_gyro_trim_y(self, new_value):
addr = 0x0D
self.write_register(addr, new_value)
@property
def creg_gyro_trim_z(self):
"""
This register sets the z-axis rate gyro trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_TRIM_Z as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_trim_z,
@creg_gyro_trim_z.setter
def creg_gyro_trim_z(self, new_value):
addr = 0x0E
self.write_register(addr, new_value)
@property
def creg_mag_cal1_1(self):
"""
Row 1, Column 1 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL1_1 as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_cal1_1,
@creg_mag_cal1_1.setter
def creg_mag_cal1_1(self, new_value):
addr = 0x0F
self.write_register(addr, new_value)
@property
def creg_mag_cal1_2(self):
"""
Row 1, Column 2 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL1_2 as float;
"""
addr = 0x10
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_cal1_2,
@creg_mag_cal1_2.setter
def creg_mag_cal1_2(self, new_value):
addr = 0x10
self.write_register(addr, new_value)
@property
def creg_mag_cal1_3(self):
"""
Row 1, Column 3 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL1_3 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_cal1_3,
@creg_mag_cal1_3.setter
def creg_mag_cal1_3(self, new_value):
addr = 0x11
self.write_register(addr, new_value)
@property
def creg_mag_cal2_1(self):
"""
Row 2, Column 1 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL2_1 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_cal2_1,
@creg_mag_cal2_1.setter
def creg_mag_cal2_1(self, new_value):
addr = 0x12
self.write_register(addr, new_value)
@property
def creg_mag_cal2_2(self):
"""
Row 2, Column 2 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL2_2 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_cal2_2,
@creg_mag_cal2_2.setter
def creg_mag_cal2_2(self, new_value):
addr = 0x13
self.write_register(addr, new_value)
@property
def creg_mag_cal2_3(self):
"""
Row 2, Column 3 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL2_3 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_cal2_3,
@creg_mag_cal2_3.setter
def creg_mag_cal2_3(self, new_value):
addr = 0x14
self.write_register(addr, new_value)
@property
def creg_mag_cal3_1(self):
"""
Row 3, Column 1 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL3_1 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_cal3_1,
@creg_mag_cal3_1.setter
def creg_mag_cal3_1(self, new_value):
addr = 0x15
self.write_register(addr, new_value)
@property
def creg_mag_cal3_2(self):
"""
Row 3, Column 2 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL3_2 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_cal3_2,
@creg_mag_cal3_2.setter
def creg_mag_cal3_2(self, new_value):
addr = 0x16
self.write_register(addr, new_value)
@property
def creg_mag_cal3_3(self):
"""
Row 3, Column 3 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL3_3 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_cal3_3,
@creg_mag_cal3_3.setter
def creg_mag_cal3_3(self, new_value):
addr = 0x17
self.write_register(addr, new_value)
@property
def creg_mag_bias_x(self):
"""
This register stores a bias term for the magnetometer x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_BIAS_X as float;
"""
addr = 0x18
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_bias_x,
@creg_mag_bias_x.setter
def creg_mag_bias_x(self, new_value):
addr = 0x18
self.write_register(addr, new_value)
@property
def creg_mag_bias_y(self):
"""
This register stores a bias term for the magnetometer y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_BIAS_Y as float;
"""
addr = 0x19
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_bias_y,
@creg_mag_bias_y.setter
def creg_mag_bias_y(self, new_value):
addr = 0x19
self.write_register(addr, new_value)
@property
def creg_mag_bias_z(self):
"""
This register stores a bias term for the magnetometer z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_BIAS_Z as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_bias_z,
@creg_mag_bias_z.setter
def creg_mag_bias_z(self, new_value):
addr = 0x1A
self.write_register(addr, new_value)
@property
def creg_accel_cal1_1(self):
"""
Row 1, Column 1 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL1_1 as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, accel_cal1_1,
@creg_accel_cal1_1.setter
def creg_accel_cal1_1(self, new_value):
addr = 0x1B
self.write_register(addr, new_value)
@property
def creg_accel_cal1_2(self):
"""
Row 1, Column 2 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL1_2 as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, accel_cal1_2,
@creg_accel_cal1_2.setter
def creg_accel_cal1_2(self, new_value):
addr = 0x1C
self.write_register(addr, new_value)
@property
def creg_accel_cal1_3(self):
"""
Row 1, Column 3 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL1_3 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, accel_cal1_3,
@creg_accel_cal1_3.setter
def creg_accel_cal1_3(self, new_value):
addr = 0x1D
self.write_register(addr, new_value)
@property
def creg_accel_cal2_1(self):
"""
Row 2, Column 1 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL2_1 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, accel_cal2_1,
@creg_accel_cal2_1.setter
def creg_accel_cal2_1(self, new_value):
addr = 0x1E
self.write_register(addr, new_value)
@property
def creg_accel_cal2_2(self):
"""
Row 2, Column 2 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL2_2 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, accel_cal2_2,
@creg_accel_cal2_2.setter
def creg_accel_cal2_2(self, new_value):
addr = 0x1F
self.write_register(addr, new_value)
@property
def creg_accel_cal2_3(self):
"""
Row 2, Column 3 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL2_3 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, accel_cal2_3,
@creg_accel_cal2_3.setter
def creg_accel_cal2_3(self, new_value):
addr = 0x20
self.write_register(addr, new_value)
@property
def creg_accel_cal3_1(self):
"""
Row 3, Column 1 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL3_1 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, accel_cal3_1,
@creg_accel_cal3_1.setter
def creg_accel_cal3_1(self, new_value):
addr = 0x21
self.write_register(addr, new_value)
@property
def creg_accel_cal3_2(self):
"""
Row 3, Column 2 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL3_2 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, accel_cal3_2,
@creg_accel_cal3_2.setter
def creg_accel_cal3_2(self, new_value):
addr = 0x22
self.write_register(addr, new_value)
@property
def creg_accel_cal3_3(self):
"""
Row 3, Column 3 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL3_3 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, accel_cal3_3,
@creg_accel_cal3_3.setter
def creg_accel_cal3_3(self, new_value):
addr = 0x23
self.write_register(addr, new_value)
@property
def creg_accel_bias_x(self):
"""
This register stores a bias term for the accelerometer x-axis for bias calibration. This term can be computed
by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_BIAS_X -- 32-bit IEEE Floating Point Value
:return: ACCEL_BIAS_X as float;
"""
addr = 0x24
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_bias_x, = struct.unpack('>f', payload[0:4])
return reg, accel_bias_x,
@creg_accel_bias_x.setter
def creg_accel_bias_x(self, new_value):
addr = 0x24
self.write_register(addr, new_value)
@property
def creg_accel_bias_y(self):
"""
This register stores a bias term for the accelerometer y-axis for bias calibration. This term can be computed
by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: ACCEL_BIAS_Y as float;
"""
addr = 0x25
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_bias_y, = struct.unpack('>f', payload[0:4])
return reg, accel_bias_y,
@creg_accel_bias_y.setter
def creg_accel_bias_y(self, new_value):
addr = 0x25
self.write_register(addr, new_value)
@property
def creg_accel_bias_z(self):
"""
This register stores a bias term for the accelerometer z-axis for bias calibration. This term can be computed
by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: ACCEL_BIAS_Z as float;
"""
addr = 0x26
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_bias_z, = struct.unpack('>f', payload[0:4])
return reg, accel_bias_z,
@creg_accel_bias_z.setter
def creg_accel_bias_z(self, new_value):
addr = 0x26
self.write_register(addr, new_value)
@property
def creg_gyro_1_meas_range(self):
"""
The CREG_GYRO_1_MEAS_RANGE register sets the desired measurement range for the gyro 1 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[4:0] : MEAS_GYRO1 -- Specifies the desired measurement range for the gyro 1 measurements.
:return: MEAS_GYRO1 as bitField;
"""
addr = 0x28
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO1 bit field
meas_gyro1_val = (reg.raw_value >> 0) & 0x001F
meas_gyro1_enum = reg.find_field_by(name='MEAS_GYRO1').find_enum_entry_by(value=meas_gyro1_val)
return reg, meas_gyro1_enum
@creg_gyro_1_meas_range.setter
def creg_gyro_1_meas_range(self, new_value):
addr = 0x28
self.write_register(addr, new_value)
@property
def creg_accel_1_meas_range(self):
"""
The CREG_ACCEL_1_MEAS_RANGE register sets the desired measurement range for the accelerometer 1. If the rate
is not set, then the default value of the +-2 g will be used as a measurement range.
Payload structure:
[1:0] : MEAS_ACC1 -- Specifies the desired measurement range for the accelerometer 1 measurements.
:return: MEAS_ACC1 as bitField;
"""
addr = 0x29
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_ACC1 bit field
meas_acc1_val = (reg.raw_value >> 0) & 0x0003
meas_acc1_enum = reg.find_field_by(name='MEAS_ACC1').find_enum_entry_by(value=meas_acc1_val)
return reg, meas_acc1_enum
@creg_accel_1_meas_range.setter
def creg_accel_1_meas_range(self, new_value):
addr = 0x29
self.write_register(addr, new_value)
@property
def dreg_health(self):
"""
The health register reports the current status of the GPS module and the other sensors on the board.
Monitoring the health register is the easiest way to monitor the quality of the GPS lock and to watch for
other problems that could affect the behavior of the board.
Payload structure:
[31:26] : SATS_USED -- Reports the number of satellites used in the position solution.
[25:16] : HDOP -- Reports the horizontal dilution of precision (HDOP) reported by the GPS. The actual HDOP value is equal to the contents of the HDOP bits divided by 10.
[15:10] : SATS_IN_VIEW -- Reports the number of satellites in view.
[8] : OVF -- Overflow bit. This bit is set if the UM8 is attempting to transmit data over the serial port faster than is allowed given the baud-rate. If this bit is set, reduce broadcast rates in the COM_RATES registers.
[5] : MG_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[4] : ACC_N -- This bit is set if the sensor detects that the norm of the accelerometer measurement is too far away from 1G to be used (i.e. during aggressive acceleration or high vibration).
[3] : ACCEL -- This bit will be set if the accelerometer fails to initialize on startup.
[2] : GYRO -- This bit will be set if the rate gyro fails to initialize on startup.
[1] : MAG -- This bit will be set if the magnetometer fails to initialize on startup.
[0] : GPS -- This bit is set if the GPS fails to send a packet for more than two seconds. If a GPS packet is ever received, this bit is cleared.
:return: SATS_USED as bitField; HDOP as bitField; SATS_IN_VIEW as bitField; OVF as bitField; MG_N as bitField; ACC_N as bitField; ACCEL as bitField; GYRO as bitField; MAG as bitField; GPS as bitField;
"""
addr = 0x55
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_HEALTH')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for SATS_USED bit field
sats_used_val = (reg.raw_value >> 26) & 0x003F
sats_used_enum = reg.find_field_by(name='SATS_USED').find_enum_entry_by(value=sats_used_val)
# find value for HDOP bit field
hdop_val = (reg.raw_value >> 16) & 0x03FF
hdop_enum = reg.find_field_by(name='HDOP').find_enum_entry_by(value=hdop_val)
# find value for SATS_IN_VIEW bit field
sats_in_view_val = (reg.raw_value >> 10) & 0x003F
sats_in_view_enum = reg.find_field_by(name='SATS_IN_VIEW').find_enum_entry_by(value=sats_in_view_val)
# find value for OVF bit field
ovf_val = (reg.raw_value >> 8) & 0x0001
ovf_enum = reg.find_field_by(name='OVF').find_enum_entry_by(value=ovf_val)
# find value for MG_N bit field
mg_n_val = (reg.raw_value >> 5) & 0x0001
mg_n_enum = reg.find_field_by(name='MG_N').find_enum_entry_by(value=mg_n_val)
# find value for ACC_N bit field
acc_n_val = (reg.raw_value >> 4) & 0x0001
acc_n_enum = reg.find_field_by(name='ACC_N').find_enum_entry_by(value=acc_n_val)
# find value for ACCEL bit field
accel_val = (reg.raw_value >> 3) & 0x0001
accel_enum = reg.find_field_by(name='ACCEL').find_enum_entry_by(value=accel_val)
# find value for GYRO bit field
gyro_val = (reg.raw_value >> 2) & 0x0001
gyro_enum = reg.find_field_by(name='GYRO').find_enum_entry_by(value=gyro_val)
# find value for MAG bit field
mag_val = (reg.raw_value >> 1) & 0x0001
mag_enum = reg.find_field_by(name='MAG').find_enum_entry_by(value=mag_val)
# find value for GPS bit field
gps_val = (reg.raw_value >> 0) & 0x0001
gps_enum = reg.find_field_by(name='GPS').find_enum_entry_by(value=gps_val)
return reg, sats_used_enum, hdop_enum, sats_in_view_enum, ovf_enum, mg_n_enum, acc_n_enum, accel_enum, gyro_enum, mag_enum, gps_enum
@property
def dreg_gyro_raw_x(self):
"""
Contains raw X axis rate gyro data.
Payload structure:
[31:0] : GYRO_RAW_X -- Gyro X (2s complement 16-bit integer)
:return: GYRO_RAW_X as int32_t;
"""
addr = 0x56
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
gyro_raw_x, = struct.unpack('>i', payload[0:4])
return reg, gyro_raw_x,
@property
def dreg_gyro_raw_y(self):
"""
Contains raw Y axis rate gyro data.
Payload structure:
[31:0] : GYRO_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_RAW_Y as int32_t;
"""
addr = 0x57
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
gyro_raw_y, = struct.unpack('>i', payload[0:4])
return reg, gyro_raw_y,
@property
def dreg_gyro_raw_z(self):
"""
Contains raw Z axis rate gyro data.
Payload structure:
[31:0] : GYRO_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_RAW_Z as int16_t;
"""
addr = 0x58
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_Z')
reg.raw_value, = struct.unpack('>h', payload[0:4])
gyro_raw_z, = struct.unpack('>h', payload[0:4])
return reg, gyro_raw_z,
@property
def dreg_gyro_raw_time(self):
"""
Contains time at which the last rate gyro data was acquired.
Payload structure:
[31:0] : GYRO_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_RAW_TIME as float;
"""
addr = 0x59
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_raw_time,
@property
def dreg_accel_raw_x(self):
"""
Contains raw X axis accelerometer data.
Payload structure:
[31:0] : ACCEL_RAW_X -- Accel X (2s complement 16-bit integer)
:return: ACCEL_RAW_X as int32_t;
"""
addr = 0x5A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
accel_raw_x, = struct.unpack('>i', payload[0:4])
return reg, accel_raw_x,
@property
def dreg_accel_raw_y(self):
"""
Contains raw Y axis accelerometer data.
Payload structure:
[31:0] : ACCEL_RAW_Y -- Accel Y (2s complement 16-bit integer)
:return: ACCEL_RAW_Y as int32_t;
"""
addr = 0x5B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
accel_raw_y, = struct.unpack('>i', payload[0:4])
return reg, accel_raw_y,
@property
def dreg_accel_raw_z(self):
"""
Contains raw Z axis accelerometer data.
Payload structure:
[31:0] : ACCEL_RAW_Z -- Accel Z (2s complement 16-bit integer)
:return: ACCEL_RAW_Z as int32_t;
"""
addr = 0x5C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_Z')
reg.raw_value, = struct.unpack('>i', payload[0:4])
accel_raw_z, = struct.unpack('>i', payload[0:4])
return reg, accel_raw_z,
@property
def dreg_accel_raw_time(self):
"""
Contains time at which the last raw data sample for the accelerometer was acquired.
Payload structure:
[31:0] : ACCEL_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: ACCEL_RAW_TIME as float;
"""
addr = 0x5D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_raw_time, = struct.unpack('>f', payload[0:4])
return reg, accel_raw_time,
@property
def dreg_mag_raw_x(self):
"""
Contains raw X axis magnetometer data.
Payload structure:
[31:0] : MAG_RAW_X -- Magnetometer X (2s complement 16-bit integer)
:return: MAG_RAW_X as int32_t;
"""
addr = 0x5E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_raw_x, = struct.unpack('>i', payload[0:4])
return reg, mag_raw_x,
@property
def dreg_mag_raw_y(self):
"""
Contains raw Y axis magnetometer data.
Payload structure:
[31:0] : MAG_RAW_Y -- Magnetometer Y (2s complement 16-bit integer)
:return: MAG_RAW_Y as int32_t;
"""
addr = 0x5F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_raw_y, = struct.unpack('>i', payload[0:4])
return reg, mag_raw_y,
@property
def dreg_mag_raw_z(self):
"""
Contains raw Z axis magnetometer data.
Payload structure:
[31:0] : MAG_RAW_Z -- Magnetometer Z (2s complement 16-bit integer)
:return: MAG_RAW_Z as int32_t;
"""
addr = 0x60
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_Z')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_raw_z, = struct.unpack('>i', payload[0:4])
return reg, mag_raw_z,
@property
def dreg_mag_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer was acquired.
Payload structure:
[31:0] : MAG_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_RAW_TIME as float;
"""
addr = 0x61
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_raw_time,
@property
def dreg_temperature(self):
"""
Contains the temperature output of the onboard temperature sensor.
Payload structure:
[31:0] : TEMPERATURE -- Temperature in degrees Celcius (32-bit IEEE Floating Point)
:return: TEMPERATURE as float;
"""
addr = 0x62
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature, = struct.unpack('>f', payload[0:4])
return reg, temperature,
@property
def dreg_temperature_time(self):
"""
Contains time at which the last temperature was acquired.
Payload structure:
[31:0] : TEMPERATURE_TIME -- 32-bit IEEE Floating Point Value
:return: TEMPERATURE_TIME as float;
"""
addr = 0x63
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature_time, = struct.unpack('>f', payload[0:4])
return reg, temperature_time,
@property
def dreg_gyro_proc_x(self):
"""
Contains the actual measured angular rate from the gyro for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_X as float;
"""
addr = 0x64
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_x,
@property
def dreg_gyro_proc_y(self):
"""
Contains the actual measured angular rate from the gyro for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_Y as float;
"""
addr = 0x65
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_y,
@property
def dreg_gyro_proc_z(self):
"""
Contains the actual measured angular rate from the gyro for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_Z as float;
"""
addr = 0x66
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_z,
@property
def dreg_gyro_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro was measured.
Payload structure:
[31:0] : GYRO_PROC_TIME -- Gyro time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_TIME as float;
"""
addr = 0x67
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_time,
@property
def dreg_accel_proc_x(self):
"""
Contains the actual measured acceleration from the accelerometer for the x axis in m/s2 after calibration has
been applied.
Payload structure:
[31:0] : ACCEL_PROC_X -- Acceleration X in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_X as float;
"""
addr = 0x68
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_x, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_x,
@property
def dreg_accel_proc_y(self):
"""
Contains the actual measured acceleration from the accelerometer for the y axis in m/s2 after calibration has
been applied.
Payload structure:
[31:0] : ACCEL_PROC_Y -- Acceleration Y in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_Y as float;
"""
addr = 0x69
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_y, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_y,
@property
def dreg_accel_proc_z(self):
"""
Contains the actual measured acceleration from the accelerometer for the z axis in m/s2 after calibration has
been applied.
Payload structure:
[31:0] : ACCEL_PROC_Z -- Acceleration Z in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_Z as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_z, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_z,
@property
def dreg_accel_proc_time(self):
"""
Contains the time at which the last acceleration data from the accelerometer was measured.
Payload structure:
[31:0] : ACCEL_PROC_TIME -- Accelerometer time stamp (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_TIME as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_time, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_time,
@property
def dreg_mag_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer for the x axis after calibration has been
applied.
Payload structure:
[31:0] : MAG_PROC_X -- Magnetometer X (32-bit IEEE Floating Point Value)
:return: MAG_PROC_X as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_x,
@property
def dreg_mag_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer for the y axis after calibration has been
applied.
Payload structure:
[31:0] : MAG_PROC_Y -- Magnetometer Y (32-bit IEEE Floating Point Value)
:return: MAG_PROC_Y as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_y,
@property
def dreg_mag_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer for the z axis after calibration has been
applied.
Payload structure:
[31:0] : MAG_PROC_Z -- Magnetometer Z (32-bit IEEE Floating Point Value)
:return: MAG_PROC_Z as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_z,
@property
def dreg_mag_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer data was acquired.
Payload structure:
[31:0] : MAG_PROC_TIME -- Magnetometer time stamp (32-bit IEEE Floating Point Value)
:return: MAG_PROC_TIME as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_time,
@property
def dreg_quat_ab(self):
"""
Contains the first two components (a and b) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_A -- First quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_B -- Second quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_A as int16_t; QUAT_B as int16_t;
"""
addr = 0x70
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_AB')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_a, quat_b = struct.unpack('>hh', payload[0:4])
return reg, quat_a, quat_b
@property
def dreg_quat_cd(self):
"""
Contains the second two components (c and d) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_C -- Third quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_D -- Fourth quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_C as int16_t; QUAT_D as int16_t;
"""
addr = 0x71
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_CD')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_c, quat_d = struct.unpack('>hh', payload[0:4])
return reg, quat_c, quat_d
@property
def dreg_quat_time(self):
"""
Contains the time that the quaternion attitude was estimated.
Payload structure:
[31:0] : QUAT_TIME -- Quaternion time (32-bit IEEE Floating Point Value)
:return: QUAT_TIME as float;
"""
addr = 0x72
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
quat_time, = struct.unpack('>f', payload[0:4])
return reg, quat_time,
@property
def dreg_euler_phi_theta(self):
"""
Contains the pitch and roll angle estimates.
Payload structure:
[31:16] : PHI -- Roll angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
[15:0] : THETA -- Pitch angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PHI as int16_t; THETA as int16_t;
"""
addr = 0x73
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi, theta = struct.unpack('>hh', payload[0:4])
return reg, phi, theta
@property
def dreg_euler_psi(self):
"""
Contains the yaw angle estimate.
Payload structure:
[31:16] : PSI -- Yaw angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PSI as int16_t;
"""
addr = 0x74
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi, = struct.unpack('>hxx', payload[0:4])
return reg, psi,
@property
def dreg_euler_phi_theta_dot(self):
"""
Contains the pitch and roll rate estimates.
Payload structure:
[31:16] : PHI_DOT -- Roll rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
[15:0] : THETA_DOT -- Pitch rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PHI_DOT as int16_t; THETA_DOT as int16_t;
"""
addr = 0x75
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA_DOT')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi_dot, theta_dot = struct.unpack('>hh', payload[0:4])
return reg, phi_dot, theta_dot
@property
def dreg_euler_psi_dot(self):
"""
Contains the yaw rate estimate.
Payload structure:
[31:16] : PSI_DOT -- Yaw rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PSI_DOT as int16_t;
"""
addr = 0x76
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI_DOT')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi_dot, = struct.unpack('>hxx', payload[0:4])
return reg, psi_dot,
@property
def dreg_euler_time(self):
"""
Contains the time that the Euler Angles were estimated.
Payload structure:
[31:0] : EULER_TIME -- Euler time (32-bit IEEE Floating Point Value)
:return: EULER_TIME as float;
"""
addr = 0x77
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
euler_time, = struct.unpack('>f', payload[0:4])
return reg, euler_time,
@property
def dreg_position_north(self):
"""
Contains the measured north position in meters from the latitude specified in CREG_HOME_NORTH.
Payload structure:
[31:0] : POSITION_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: POSITION_NORTH as float;
"""
addr = 0x78
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_north, = struct.unpack('>f', payload[0:4])
return reg, position_north,
@property
def dreg_position_east(self):
"""
Contains the measured east position in meters from the longitude specified in CREG_HOME_EAST.
Payload structure:
[31:0] : POSITION_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: POSITION_EAST as float;
"""
addr = 0x79
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_east, = struct.unpack('>f', payload[0:4])
return reg, position_east,
@property
def dreg_position_up(self):
"""
Contains the measured altitude in meters from the altitude specified in CREG_HOME_UP.
Payload structure:
[31:0] : POSITION_UP -- Altitude (32-bit IEEE Floating Point Value)
:return: POSITION_UP as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_up, = struct.unpack('>f', payload[0:4])
return reg, position_up,
@property
def dreg_position_time(self):
"""
Contains the time at which the position was acquired.
Payload structure:
[31:0] : POSITION_TIME -- Position Time (32-bit IEEE Floating Point Value)
:return: POSITION_TIME as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_time, = struct.unpack('>f', payload[0:4])
return reg, position_time,
@property
def dreg_velocity_north(self):
"""
Contains the measured north velocity in m/s.
Payload structure:
[31:0] : VELOCITY_NORTH -- North Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_NORTH as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_north, = struct.unpack('>f', payload[0:4])
return reg, velocity_north,
@property
def dreg_velocity_east(self):
"""
Contains the measured east velocity in m/s.
Payload structure:
[31:0] : VELOCITY_EAST -- East Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_EAST as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_east, = struct.unpack('>f', payload[0:4])
return reg, velocity_east,
@property
def dreg_velocity_up(self):
"""
Contains the measured altitude velocity in m/s.
Payload structure:
[31:0] : VELOCITY_UP -- Altitude Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_UP as float;
"""
addr = 0x7E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_up, = struct.unpack('>f', payload[0:4])
return reg, velocity_up,
@property
def dreg_velocity_time(self):
"""
Contains the time at which the velocity was measured.
Payload structure:
[31:0] : VELOCITY_TIME -- Velocity time (32-bit IEEE Floating Point Value)
:return: VELOCITY_TIME as float;
"""
addr = 0x7F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_time, = struct.unpack('>f', payload[0:4])
return reg, velocity_time,
@property
def dreg_gps_latitude(self):
"""
Contains the GPS-reported latitude in degrees.
Payload structure:
[31:0] : GPS_LATITUDE -- GPS Latitude (32-bit IEEE Floating Point Value)
:return: GPS_LATITUDE as float;
"""
addr = 0x80
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_LATITUDE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_latitude, = struct.unpack('>f', payload[0:4])
return reg, gps_latitude,
@property
def dreg_gps_longitude(self):
"""
Contains the GPS-reported longitude in degrees.
Payload structure:
[31:0] : GPS_LONGITUDE -- GPS Longitude (32-bit IEEE Floating Point Value)
:return: GPS_LONGITUDE as float;
"""
addr = 0x81
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_LONGITUDE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_longitude, = struct.unpack('>f', payload[0:4])
return reg, gps_longitude,
@property
def dreg_gps_altitude(self):
"""
Contains the GPS-reported altitude in meters.
Payload structure:
[31:0] : GPS_ALTITUDE -- GPS Altitude (32-bit IEEE Floating Point Value)
:return: GPS_ALTITUDE as float;
"""
addr = 0x82
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_ALTITUDE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_altitude, = struct.unpack('>f', payload[0:4])
return reg, gps_altitude,
@property
def dreg_gps_course(self):
"""
Contains the GPS-reported course in degrees.
Payload structure:
[31:0] : GPS_COURSE -- GPS Course (32-bit IEEE Floating Point Value)
:return: GPS_COURSE as float;
"""
addr = 0x83
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_COURSE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_course, = struct.unpack('>f', payload[0:4])
return reg, gps_course,
@property
def dreg_gps_speed(self):
"""
Contains the GPS-reported speed in m/s.
Payload structure:
[31:0] : GPS_SPEED -- GPS Speed (32-bit IEEE Floating Point Value)
:return: GPS_SPEED as float;
"""
addr = 0x84
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SPEED')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_speed, = struct.unpack('>f', payload[0:4])
return reg, gps_speed,
@property
def dreg_gps_time(self):
"""
Contains the GPS-reported time in seconds from the last epoch.
Payload structure:
[31:0] : GPS_TIME -- GPS Speed (32-bit IEEE Floating Point Value)
:return: GPS_TIME as float;
"""
addr = 0x85
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_time, = struct.unpack('>f', payload[0:4])
return reg, gps_time,
@property
def dreg_gps_sat_1_2(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 1 and 2.
Payload structure:
[31:24] : SAT_1_ID -- Satellite 1 ID
[23:16] : SAT_1_SNR -- Signal-to-Noise Ratio of satellite 1 as reported by GPS receiver.
[15:8] : SAT_2_ID -- Satellite 2 ID
[7:0] : SAT_2_SNR -- Signal-to-Noise Ratio of satellite 2 as reported by GPS receiver.
:return: SAT_1_ID as uint8_t; SAT_1_SNR as uint8_t; SAT_2_ID as uint8_t; SAT_2_SNR as uint8_t;
"""
addr = 0x86
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_1_2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_1_id, sat_1_snr, sat_2_id, sat_2_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_1_id, sat_1_snr, sat_2_id, sat_2_snr
@property
def dreg_gps_sat_3_4(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 3 and 4.
Payload structure:
[31:24] : SAT_3_ID -- Satellite 3 ID
[23:16] : SAT_3_SNR -- Signal-to-Noise Ratio of satellite 3 as reported by GPS receiver.
[15:8] : SAT_4_ID -- Satellite 4 ID
[7:0] : SAT_4_SNR -- Signal-to-Noise Ratio of satellite 4 as reported by GPS receiver.
:return: SAT_3_ID as uint8_t; SAT_3_SNR as uint8_t; SAT_4_ID as uint8_t; SAT_4_SNR as uint8_t;
"""
addr = 0x87
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_3_4')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_3_id, sat_3_snr, sat_4_id, sat_4_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_3_id, sat_3_snr, sat_4_id, sat_4_snr
@property
def dreg_gps_sat_5_6(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 5 and 6.
Payload structure:
[31:24] : SAT_5_ID -- Satellite 5 ID
[23:16] : SAT_5_SNR -- Signal-to-Noise Ratio of satellite 5 as reported by GPS receiver.
[15:8] : SAT_6_ID -- Satellite 6 ID
[7:0] : SAT_6_SNR -- Signal-to-Noise Ratio of satellite 6 as reported by GPS receiver.
:return: SAT_5_ID as uint8_t; SAT_5_SNR as uint8_t; SAT_6_ID as uint8_t; SAT_6_SNR as uint8_t;
"""
addr = 0x88
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_5_6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_5_id, sat_5_snr, sat_6_id, sat_6_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_5_id, sat_5_snr, sat_6_id, sat_6_snr
@property
def dreg_gps_sat_7_8(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 7 and 8.
Payload structure:
[31:24] : SAT_7_ID -- Satellite 7 ID
[23:16] : SAT_7_SNR -- Signal-to-Noise Ratio of satellite 7 as reported by GPS receiver.
[15:8] : SAT_8_ID -- Satellite 8 ID
[7:0] : SAT_8_SNR -- Signal-to-Noise Ratio of satellite 8 as reported by GPS receiver.
:return: SAT_7_ID as uint8_t; SAT_7_SNR as uint8_t; SAT_8_ID as uint8_t; SAT_8_SNR as uint8_t;
"""
addr = 0x89
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_7_8')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_7_id, sat_7_snr, sat_8_id, sat_8_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_7_id, sat_7_snr, sat_8_id, sat_8_snr
@property
def dreg_gps_sat_9_10(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 9 and 10.
Payload structure:
[31:24] : SAT_9_ID -- Satellite 9 ID
[23:16] : SAT_9_SNR -- Signal-to-Noise Ratio of satellite 9 as reported by GPS receiver.
[15:8] : SAT_10_ID -- Satellite 10 ID
[7:0] : SAT_10_SNR -- Signal-to-Noise Ratio of satellite 10 as reported by GPS receiver.
:return: SAT_9_ID as uint8_t; SAT_9_SNR as uint8_t; SAT_10_ID as uint8_t; SAT_10_SNR as uint8_t;
"""
addr = 0x8A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_9_10')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_9_id, sat_9_snr, sat_10_id, sat_10_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_9_id, sat_9_snr, sat_10_id, sat_10_snr
@property
def dreg_gps_sat_11_12(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 11 and 12.
Payload structure:
[31:24] : SAT_11_ID -- Satellite 11 ID
[23:16] : SAT_11_SNR -- Signal-to-Noise Ratio of satellite 11 as reported by GPS receiver.
[15:8] : SAT_12_ID -- Satellite 12 ID
[7:0] : SAT_12_SNR -- Signal-to-Noise Ratio of satellite 12 as reported by GPS receiver.
:return: SAT_11_ID as uint8_t; SAT_11_SNR as uint8_t; SAT_12_ID as uint8_t; SAT_12_SNR as uint8_t;
"""
addr = 0x8B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_11_12')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_11_id, sat_11_snr, sat_12_id, sat_12_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_11_id, sat_11_snr, sat_12_id, sat_12_snr
@property
def dreg_gyro_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro in degrees/s.
Payload structure:
[31:0] : GYRO_BIAS_X -- Gyro bias X (32-bit IEEE Floating Point Value)
:return: GYRO_BIAS_X as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_bias_x,
@property
def dreg_gyro_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro in degrees/s.
Payload structure:
[31:0] : GYRO_BIAS_Y -- Gyro bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_BIAS_Y as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_bias_y,
@property
def dreg_gyro_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro in degrees/s.
Payload structure:
[31:0] : GYRO_BIAS_Z -- Gyro bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_BIAS_Z as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_bias_z,
@property
def dreg_mag_1_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 1 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_1_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_1_NORM as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_1_norm,
@property
def get_fw_revision(self):
"""
Firmware build identification string: a four byte ASCII character sequence which corresponds to a firmware
series.
Payload structure:
[31:0] : FW_REVISION -- Firmware revision string
:return: FW_REVISION as string;
"""
addr = 0xAA
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_REVISION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
fw_revision = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return fw_revision
@property
def flash_commit(self):
raise RuntimeError('flash_commit has no getter! The register flash_commit is write-only!')
@flash_commit.setter
def flash_commit(self, new_value):
addr = 0xAB
self.write_register(addr, new_value)
@property
def reset_to_factory(self):
raise RuntimeError('reset_to_factory has no getter! The register reset_to_factory is write-only!')
@reset_to_factory.setter
def reset_to_factory(self, new_value):
addr = 0xAC
self.write_register(addr, new_value)
@property
def zero_gyros(self):
raise RuntimeError('zero_gyros has no getter! The register zero_gyros is write-only!')
@zero_gyros.setter
def zero_gyros(self, new_value):
addr = 0xAD
self.write_register(addr, new_value)
@property
def set_home_position(self):
raise RuntimeError('set_home_position has no getter! The register set_home_position is write-only!')
@set_home_position.setter
def set_home_position(self, new_value):
addr = 0xAE
self.write_register(addr, new_value)
@property
def set_mag_reference(self):
raise RuntimeError('set_mag_reference has no getter! The register set_mag_reference is write-only!')
@set_mag_reference.setter
def set_mag_reference(self, new_value):
addr = 0xB0
self.write_register(addr, new_value)
@property
def calibrate_accelerometers(self):
raise RuntimeError('calibrate_accelerometers has no getter! The register calibrate_accelerometers is write-only!')
@calibrate_accelerometers.setter
def calibrate_accelerometers(self, new_value):
addr = 0xB1
self.write_register(addr, new_value)
@property
def reset_ekf(self):
raise RuntimeError('reset_ekf has no getter! The register reset_ekf is write-only!')
@reset_ekf.setter
def reset_ekf(self, new_value):
addr = 0xB3
self.write_register(addr, new_value)
@property
def hidden_gyro_variance(self):
"""
Gyro variance
Payload structure:
[31:0] : GYRO_VARIANCE -- Gyro variance for EKF
:return: GYRO_VARIANCE as float;
"""
addr = 0x00
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_variance, = struct.unpack('>f', payload[0:4])
return reg, gyro_variance,
@hidden_gyro_variance.setter
def hidden_gyro_variance(self, new_value):
addr = 0x00
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_variance(self):
"""
Accelerometer variance
Payload structure:
[31:0] : ACCEL_VARIANCE -- Accelerometer variance IEEE floating point value.
:return: ACCEL_VARIANCE as float;
"""
addr = 0x01
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_variance, = struct.unpack('>f', payload[0:4])
return reg, accel_variance,
@hidden_accel_variance.setter
def hidden_accel_variance(self, new_value):
addr = 0x01
self.write_register(addr, new_value, hidden=True)
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/um8_registers.py | 0.891055 | 0.333008 | um8_registers.py | pypi |
from dataclasses import dataclass
@dataclass
class UM8AllRawPacket:
gyro_raw_x: int
gyro_raw_y: int
gyro_raw_z: int
gyro_raw_time: float
accel_raw_x: int
accel_raw_y: int
accel_raw_z: int
accel_raw_time: float
mag_raw_x: int
mag_raw_y: int
mag_raw_z: int
mag_raw_time: float
temperature: float
temperature_time: float
def __repr__(self):
return f"RawPacket("\
f"gyro=[{self.gyro_raw_x:>+5d}, {self.gyro_raw_y:>+5d}, {self.gyro_raw_z:>+5d}], "\
f"gyro_t={self.gyro_raw_time:>6.3f}; " \
f"accel=[{self.accel_raw_x:>+5d}, {self.accel_raw_y:>+5d}, {self.accel_raw_z:>+5d}], " \
f"accel_t={self.accel_raw_time:>6.3f}; " \
f"mag=[{self.mag_raw_x:>+8d}, {self.mag_raw_y:>+8d}, {self.mag_raw_z:>+8d}], " \
f"mag_t={self.mag_raw_time:>6.3f}; " \
f"T={self.temperature:>+3.2f}, " \
f"T_t={self.temperature_time:>6.3f})"
@dataclass
class UM8AllProcPacket:
gyro_proc_x: float
gyro_proc_y: float
gyro_proc_z: float
gyro_proc_time: float
accel_proc_x: float
accel_proc_y: float
accel_proc_z: float
accel_proc_time: float
mag_proc_x: float
mag_proc_y: float
mag_proc_z: float
mag_proc_time: float
def __repr__(self):
return f"ProcPacket("\
f"gyro=[{self.gyro_proc_x:>+8.3f}, {self.gyro_proc_y:>+8.3f}, {self.gyro_proc_z:>+8.3f}], "\
f"gyro_t={self.gyro_proc_time:>6.3f}; " \
f"accel=[{self.accel_proc_x:>+8.3f}, {self.accel_proc_y:>+8.3f}, {self.accel_proc_z:>+8.3f}], " \
f"accel_t={self.accel_proc_time:>6.3f}; " \
f"mag=[{self.mag_proc_x:>+8.6f}, {self.mag_proc_y:>+8.6f}, {self.mag_proc_z:>+8.6f}], " \
f"mag_t={self.mag_proc_time:>6.3f})"
@dataclass
class UM8EulerPacket:
roll: float
pitch: float
yaw: float
roll_rate: float
pitch_rate: float
yaw_rate: float
time_stamp: float
def __repr__(self):
return f"EulerPacket("\
f"roll={self.roll:>+8.3f}; pitch={self.pitch:>+8.3f}; yaw={self.yaw:>+8.3f}; "\
f"roll_rate={self.roll_rate:>+8.3f}; pitch_rate={self.pitch_rate:>+8.3f}; yaw_rate={self.yaw_rate:>+8.3f}; " \
f"time_stamp={self.time_stamp:>6.3f})"
@dataclass
class UM8HealthPacket:
health: int
def __repr__(self):
return f"HealthPacket("\
f"raw_value=0x{self.health:04X} -> " \
f"SATS_USED={(self.health >> 26) & 0x3F}, " \
f"HDOP={(self.health >> 16) & 0x7F}, " \
f"SATS_IN_VIEW={(self.health >> 10) & 0x3F}, " \
f"OVF={bool((self.health >> 8) & 0x01)}, " \
f"MG_N={bool((self.health >> 5) & 0x01)}, " \
f"ACC_N={bool((self.health >> 4) & 0x01)}, " \
f"ACCEL={bool((self.health >> 3) & 0x01)}, "\
f"GYRO={bool((self.health >> 2) & 0x01)}, " \
f"MAG={bool((self.health >> 1) & 0x01)}, " \
f"GPS={bool((self.health >> 0) & 0x01)})"
@dataclass
class UM8RawAccelPacket:
accel_raw_x: int
accel_raw_y: int
accel_raw_z: int
accel_raw_time: float
@dataclass
class UM8RawGyroPacket:
gyro_raw_x: int
gyro_raw_y: int
gyro_raw_z: int
gyro_raw_time: float
@dataclass
class UM8RawMagPacket:
mag_raw_x: int
mag_raw_y: int
mag_raw_z: int
mag_raw_time: float
@dataclass
class UM8TemperaturePacket:
temperature: float
temperature_time: float
@dataclass
class UM8ProcAccelPacket:
accel_proc_x: float
accel_proc_y: float
accel_proc_z: float
accel_proc_time: float
@dataclass
class UM8ProcGyroPacket:
gyro_proc_x: float
gyro_proc_y: float
gyro_proc_z: float
gyro_proc_time: float
@dataclass
class UM8ProcMagPacket:
mag_proc_x: float
mag_proc_y: float
mag_proc_z: float
mag_proc_time: float
@dataclass
class UM8QuaternionPacket:
q_w: float
q_x: float
q_y: float
q_z: float
q_time: float
@dataclass
class UM8EulerPosePacket:
roll: float
pitch: float
yaw: float
roll_rate: float
pitch_rate: float
yaw_rate: float
euler_time: float
position_north: float
position_east: float
position_up: float
position_time: float
@dataclass
class UM8PosePacket:
position_north: float
position_east: float
position_up: float
position_time: float
@dataclass
class UM8VelocityPacket:
velocity_north: float
velocity_east: float
velocity_up: float
velocity_time: float
@dataclass
class UM8GyroBiasPacket:
gyro_bias_x: float
gyro_bias_y: float
gyro_bias_z: float
def __repr__(self):
return f"GyroBiasPacket("\
f"gyro_bias=[{self.gyro_bias_x:>+8.3f}, {self.gyro_bias_y:>+8.3f}, {self.gyro_bias_z:>+8.3f}])"
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/um8_broadcast_packets.py | 0.864411 | 0.429968 | um8_broadcast_packets.py | pypi |
# Author: Dr. Konstantin Selyunin
# License: MIT
# Created: 2020.08.19
import logging
import os.path
import struct
from abc import abstractmethod, ABC
from typing import Union, Tuple
from .rsl_xml_svd.rsl_svd_parser import RslSvdParser
class ShearWaterRegisters(ABC):
def __init__(self, **kwargs):
self.svd_parser = RslSvdParser(svd_file=ShearWaterRegisters.find_svd('shearwater.svd'))
@staticmethod
def find_svd(svd_file_name: str):
parent_dir = os.path.join(os.path.dirname(__file__), os.pardir)
for root, dirs, files in os.walk(parent_dir):
if svd_file_name in files:
return os.path.join(root, svd_file_name)
@abstractmethod
def connect(self, *args, **kwargs):
pass
@abstractmethod
def read_register(self, reg_addr: int, **kw) -> Tuple[bool, bytes]:
pass
@abstractmethod
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float, str], **kw):
pass
@property
def creg_com_settings(self):
"""
The CREG_COM_SETTINGS register is used to set the boards serial port baud rate and to enable (disable) the
automatic transmission of sensor data and estimated states (telemetry).
Payload structure:
[31:28] : BAUD_RATE -- Sets the baud rate of the boards main serial port:
:return: BAUD_RATE as bitField;
"""
addr = 0x00
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for BAUD_RATE bit field
baud_rate_val = (reg.raw_value >> 28) & 0x000F
baud_rate_enum = reg.find_field_by(name='BAUD_RATE').find_enum_entry_by(value=baud_rate_val)
return reg, baud_rate_enum
@creg_com_settings.setter
def creg_com_settings(self, new_value):
addr = 0x00
self.write_register(addr, new_value)
@property
def creg_com_rates1(self):
"""
The CREG_COM_RATES1 register sets desired telemetry transmission rates in Hz for raw accelerometer 1, gyro 1,
gyro 2 and magnetometer 1 data. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : RAW_ACCEL_1_RATE -- Specifies the desired raw accelerometer 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_GYRO_1_RATE -- Specifies the desired raw gyro 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz
[15:8] : RAW_GYRO_2_RATE -- Specifies the desired raw gyro 2 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : RAW_MAG_1_RATE -- Specifies the desired raw magnetometer 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: RAW_ACCEL_1_RATE as uint8_t; RAW_GYRO_1_RATE as uint8_t; RAW_GYRO_2_RATE as uint8_t; RAW_MAG_1_RATE as uint8_t;
"""
addr = 0x01
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate
@creg_com_rates1.setter
def creg_com_rates1(self, new_value):
addr = 0x01
self.write_register(addr, new_value)
@property
def creg_com_rates2(self):
"""
The CREG_COM_RATES2 register sets desired telemetry transmission rates for the magnetometer 2, all raw data,
and temperature data rate. The ALL_RAW_RATE setting has higher priority over the individual raw sensor data
settings, i.e. whenever this bitfield is set, then the individual raw sensor settings are ignored and not
used. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : TEMP_RATE -- Specifies the desired broadcast rate for temperature data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_MAG_2_RATE -- Specifies the desired raw magnetometer 2 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_RAW_RATE -- Specifies the desired broadcast rate for all raw sensor data. If set, this overrides the broadcast rate setting for individual raw data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: TEMP_RATE as uint8_t; RAW_MAG_2_RATE as uint8_t; ALL_RAW_RATE as uint8_t;
"""
addr = 0x02
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
temp_rate, raw_mag_2_rate, all_raw_rate = struct.unpack('>BBxB', payload[0:4])
return reg, temp_rate, raw_mag_2_rate, all_raw_rate
@creg_com_rates2.setter
def creg_com_rates2(self, new_value):
addr = 0x02
self.write_register(addr, new_value)
@property
def creg_com_rates3(self):
"""
The CREG_COM_RATES3 register sets desired telemetry transmission rates for processed sensor data for the
sensors: the accelerometer 1, gyro 1, gyro 2, and magnetometer 1. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : PROC_ACCEL_1_RATE -- Specifies the desired broadcast rate for processed accelerometer 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : PROC_GYRO_1_RATE -- Specifies the desired broadcast rate for processed rate gyro 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : PROC_GYRO_2_RATE -- Specifies the desired broadcast rate for processed processed rate gyro 2 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : PROC_MAG_1_RATE -- Specifies the desired broadcast rate for processed magnetometer 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_ACCEL_1_RATE as uint8_t; PROC_GYRO_1_RATE as uint8_t; PROC_GYRO_2_RATE as uint8_t; PROC_MAG_1_RATE as uint8_t;
"""
addr = 0x03
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES3')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate
@creg_com_rates3.setter
def creg_com_rates3(self, new_value):
addr = 0x03
self.write_register(addr, new_value)
@property
def creg_com_rates4(self):
"""
The CREG_COM_RATES4 register defines the desired telemetry transmission rates for the processed data for the
magnetometer 2, and for all processed data. The ALL_PROC_RATE setting has higher priority over the individual
processed sensor data settings, i.e. whenever this bitfield is set, then the individual processed sensor
transmission rate settings are ignored and not used. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : PROC_MAG_2_RATE -- Specifies the desired broadcast rate for processed magnetometer 2 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_PROC_RATE -- Specifies the desired broadcast rate for raw all processed sensor data. If set, this overrides the broadcast rate setting for individual processed data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_MAG_2_RATE as uint8_t; ALL_PROC_RATE as uint8_t;
"""
addr = 0x04
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES4')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_mag_2_rate, all_proc_rate = struct.unpack('>BxxB', payload[0:4])
return reg, proc_mag_2_rate, all_proc_rate
@creg_com_rates4.setter
def creg_com_rates4(self, new_value):
addr = 0x04
self.write_register(addr, new_value)
@property
def creg_com_rates5(self):
"""
The CREG_COM_RATES5 register sets desired telemetry transmission rates for quaternions, Euler Angles,
position, and velocity estimates. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : QUAT_RATE -- Specifies the desired broadcast rate for quaternion data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : EULER_RATE -- Specifies the desired broadcast rate for Euler Angle data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : POSITION_RATE -- Specifies the desired broadcast rate position. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : VELOCITY_RATE -- Specifies the desired broadcast rate for velocity. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: QUAT_RATE as uint8_t; EULER_RATE as uint8_t; POSITION_RATE as uint8_t; VELOCITY_RATE as uint8_t;
"""
addr = 0x05
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES5')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_rate, euler_rate, position_rate, velocity_rate = struct.unpack('>BBBB', payload[0:4])
return reg, quat_rate, euler_rate, position_rate, velocity_rate
@creg_com_rates5.setter
def creg_com_rates5(self, new_value):
addr = 0x05
self.write_register(addr, new_value)
@property
def creg_com_rates6(self):
"""
The CREG_COM_RATES6 register sets desired telemetry transmission rates for pose (Euler/position packet),
health, and gyro bias estimates for the gyro 1 and gyro 2. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : POSE_RATE -- Specifies the desired broadcast rate for pose (Euler Angle and position) data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[19:16] : HEALTH_RATE -- Specifies the desired broadcast rate for the sensor health packet.
[15:8] : GYRO_BIAS_1_RATE -- Specifies the desired broadcast rate for gyro 1 bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : GYRO_BIAS_2_RATE -- Specifies the desired broadcast rate for gyro 2 bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: POSE_RATE as uint8_t; HEALTH_RATE as bitField; GYRO_BIAS_1_RATE as uint8_t; GYRO_BIAS_2_RATE as uint8_t;
"""
addr = 0x06
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pose_rate, gyro_bias_1_rate, gyro_bias_2_rate = struct.unpack('>BxBB', payload[0:4])
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for HEALTH_RATE bit field
health_rate_val = (reg.raw_value >> 16) & 0x000F
health_rate_enum = reg.find_field_by(name='HEALTH_RATE').find_enum_entry_by(value=health_rate_val)
return reg, pose_rate, gyro_bias_1_rate, gyro_bias_2_rate, reg, health_rate_enum
@creg_com_rates6.setter
def creg_com_rates6(self, new_value):
addr = 0x06
self.write_register(addr, new_value)
@property
def creg_com_rates7(self):
"""
The CREG_COM_RATES7 register sets desired telemetry transmission rates in Hz for NMEA packets.
Payload structure:
[31:28] : NMEA_HEALTH_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style health packet.
[27:24] : NMEA_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style pose (Euler Angle/position) packet.
[23:20] : NMEA_ATTITUDE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style attitude packet.
[19:16] : NMEA_SENSOR_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style sensor data packet.
[15:12] : NMEA_RATES_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style rate data packet.
[11:8] : NMEA_GPS_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style GPS pose packet.
[7:4] : NMEA_QUAT_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style quaternion packet.
:return: NMEA_HEALTH_RATE as bitField; NMEA_POSE_RATE as bitField; NMEA_ATTITUDE_RATE as bitField; NMEA_SENSOR_RATE as bitField; NMEA_RATES_RATE as bitField; NMEA_GPS_POSE_RATE as bitField; NMEA_QUAT_RATE as bitField;
"""
addr = 0x07
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES7')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for NMEA_HEALTH_RATE bit field
nmea_health_rate_val = (reg.raw_value >> 28) & 0x000F
nmea_health_rate_enum = reg.find_field_by(name='NMEA_HEALTH_RATE').find_enum_entry_by(value=nmea_health_rate_val)
# find value for NMEA_POSE_RATE bit field
nmea_pose_rate_val = (reg.raw_value >> 24) & 0x000F
nmea_pose_rate_enum = reg.find_field_by(name='NMEA_POSE_RATE').find_enum_entry_by(value=nmea_pose_rate_val)
# find value for NMEA_ATTITUDE_RATE bit field
nmea_attitude_rate_val = (reg.raw_value >> 20) & 0x000F
nmea_attitude_rate_enum = reg.find_field_by(name='NMEA_ATTITUDE_RATE').find_enum_entry_by(value=nmea_attitude_rate_val)
# find value for NMEA_SENSOR_RATE bit field
nmea_sensor_rate_val = (reg.raw_value >> 16) & 0x000F
nmea_sensor_rate_enum = reg.find_field_by(name='NMEA_SENSOR_RATE').find_enum_entry_by(value=nmea_sensor_rate_val)
# find value for NMEA_RATES_RATE bit field
nmea_rates_rate_val = (reg.raw_value >> 12) & 0x000F
nmea_rates_rate_enum = reg.find_field_by(name='NMEA_RATES_RATE').find_enum_entry_by(value=nmea_rates_rate_val)
# find value for NMEA_GPS_POSE_RATE bit field
nmea_gps_pose_rate_val = (reg.raw_value >> 8) & 0x000F
nmea_gps_pose_rate_enum = reg.find_field_by(name='NMEA_GPS_POSE_RATE').find_enum_entry_by(value=nmea_gps_pose_rate_val)
# find value for NMEA_QUAT_RATE bit field
nmea_quat_rate_val = (reg.raw_value >> 4) & 0x000F
nmea_quat_rate_enum = reg.find_field_by(name='NMEA_QUAT_RATE').find_enum_entry_by(value=nmea_quat_rate_val)
return reg, nmea_health_rate_enum, nmea_pose_rate_enum, nmea_attitude_rate_enum, nmea_sensor_rate_enum, nmea_rates_rate_enum, nmea_gps_pose_rate_enum, nmea_quat_rate_enum
@creg_com_rates7.setter
def creg_com_rates7(self, new_value):
addr = 0x07
self.write_register(addr, new_value)
@property
def creg_misc_settings(self):
"""
This register contains miscellaneous filter and sensor control options.
Payload structure:
[8] : PPS -- If set, this bit causes the TX2 pin on the IO Expansion header to be used as the PPS input from an external GPS module. PPS pulses will then be used to synchronize the system clock to UTC time of day.
[3] : ZG -- If set, this bit causes the devicee to attempt to measure the rate gyro bias on startup. The sensor must be stationary on startup for this feature to work properly.
[2] : Q -- If this bit is set, the sensor will run in quaternion mode instead of Euler Angle mode.
[1] : MAG1 -- If set, the magnetometer 1 will be used in state updates.
[0] : MAG2 -- If set, the magnetometer 2 will be used in state updates.
:return: PPS as bitField; ZG as bitField; Q as bitField; MAG1 as bitField; MAG2 as bitField;
"""
addr = 0x08
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MISC_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for PPS bit field
pps_val = (reg.raw_value >> 8) & 0x0001
pps_enum = reg.find_field_by(name='PPS').find_enum_entry_by(value=pps_val)
# find value for ZG bit field
zg_val = (reg.raw_value >> 3) & 0x0001
zg_enum = reg.find_field_by(name='ZG').find_enum_entry_by(value=zg_val)
# find value for Q bit field
q_val = (reg.raw_value >> 2) & 0x0001
q_enum = reg.find_field_by(name='Q').find_enum_entry_by(value=q_val)
# find value for MAG1 bit field
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
# find value for MAG2 bit field
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, pps_enum, zg_enum, q_enum, mag1_enum, mag2_enum
@creg_misc_settings.setter
def creg_misc_settings(self, new_value):
addr = 0x08
self.write_register(addr, new_value)
@property
def creg_gyro_1_meas_range(self):
"""
The CREG_GYRO_1_MEAS_RANGE register sets the desired measurement range for the gyro 1 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[1:0] : MEAS_GYRO1 -- Specifies the desired measurement range for the gyro 1 measurements.
:return: MEAS_GYRO1 as bitField;
"""
addr = 0x09
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO1 bit field
meas_gyro1_val = (reg.raw_value >> 0) & 0x0003
meas_gyro1_enum = reg.find_field_by(name='MEAS_GYRO1').find_enum_entry_by(value=meas_gyro1_val)
return reg, meas_gyro1_enum
@creg_gyro_1_meas_range.setter
def creg_gyro_1_meas_range(self, new_value):
addr = 0x09
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_x(self):
"""
This register sets the x-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_X as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_x,
@creg_gyro_1_trim_x.setter
def creg_gyro_1_trim_x(self, new_value):
addr = 0x0A
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_y(self):
"""
This register sets the y-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_Y as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_y,
@creg_gyro_1_trim_y.setter
def creg_gyro_1_trim_y(self, new_value):
addr = 0x0B
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_z(self):
"""
This register sets the z-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_Z as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_z,
@creg_gyro_1_trim_z.setter
def creg_gyro_1_trim_z(self, new_value):
addr = 0x0C
self.write_register(addr, new_value)
@property
def creg_gyro_2_meas_range(self):
"""
The CREG_GYRO_2_MEAS_RANGE register sets the desired measurement range for the gyro 2 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[1:0] : MEAS_GYRO2 -- Specifies the desired measurement range for the gyro 2 measurements.
:return: MEAS_GYRO2 as bitField;
"""
addr = 0x0D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO2 bit field
meas_gyro2_val = (reg.raw_value >> 0) & 0x0003
meas_gyro2_enum = reg.find_field_by(name='MEAS_GYRO2').find_enum_entry_by(value=meas_gyro2_val)
return reg, meas_gyro2_enum
@creg_gyro_2_meas_range.setter
def creg_gyro_2_meas_range(self, new_value):
addr = 0x0D
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_x(self):
"""
This register sets the x-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_X as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_x,
@creg_gyro_2_trim_x.setter
def creg_gyro_2_trim_x(self, new_value):
addr = 0x0E
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_y(self):
"""
This register sets the y-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_Y as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_y,
@creg_gyro_2_trim_y.setter
def creg_gyro_2_trim_y(self, new_value):
addr = 0x0F
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_z(self):
"""
This register sets the z-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_Z as float;
"""
addr = 0x10
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_z,
@creg_gyro_2_trim_z.setter
def creg_gyro_2_trim_z(self, new_value):
addr = 0x10
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_1(self):
"""
Row 1, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_1 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_1,
@creg_mag_1_cal1_1.setter
def creg_mag_1_cal1_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_2(self):
"""
Row 1, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_2 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_2,
@creg_mag_1_cal1_2.setter
def creg_mag_1_cal1_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_3(self):
"""
Row 1, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_3 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_3,
@creg_mag_1_cal1_3.setter
def creg_mag_1_cal1_3(self, new_value):
addr = 0x13
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_1(self):
"""
Row 2, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_1 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_1,
@creg_mag_1_cal2_1.setter
def creg_mag_1_cal2_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_2(self):
"""
Row 2, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_2 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_2,
@creg_mag_1_cal2_2.setter
def creg_mag_1_cal2_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_3(self):
"""
Row 2, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_3 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_3,
@creg_mag_1_cal2_3.setter
def creg_mag_1_cal2_3(self, new_value):
addr = 0x16
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_1(self):
"""
Row 3, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_1 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_1,
@creg_mag_1_cal3_1.setter
def creg_mag_1_cal3_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_2(self):
"""
Row 3, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_2 as float;
"""
addr = 0x18
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_2,
@creg_mag_1_cal3_2.setter
def creg_mag_1_cal3_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_3(self):
"""
Row 3, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_3 as float;
"""
addr = 0x19
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_3,
@creg_mag_1_cal3_3.setter
def creg_mag_1_cal3_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_x(self):
"""
This register stores a bias term for the magnetometer 1 x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_X as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_x,
@creg_mag_1_bias_x.setter
def creg_mag_1_bias_x(self, new_value):
addr = 0x1A
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_y(self):
"""
This register stores a bias term for the magnetometer 1 y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_Y as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_y,
@creg_mag_1_bias_y.setter
def creg_mag_1_bias_y(self, new_value):
addr = 0x1B
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_z(self):
"""
This register stores a bias term for the magnetometer 1 z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_Z as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_z,
@creg_mag_1_bias_z.setter
def creg_mag_1_bias_z(self, new_value):
addr = 0x1C
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_1(self):
"""
Row 1, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_1 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_1,
@creg_mag_2_cal1_1.setter
def creg_mag_2_cal1_1(self, new_value):
addr = 0x1D
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_2(self):
"""
Row 1, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_2 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_2,
@creg_mag_2_cal1_2.setter
def creg_mag_2_cal1_2(self, new_value):
addr = 0x1E
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_3(self):
"""
Row 1, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_3 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_3,
@creg_mag_2_cal1_3.setter
def creg_mag_2_cal1_3(self, new_value):
addr = 0x1F
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_1(self):
"""
Row 2, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_1 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_1,
@creg_mag_2_cal2_1.setter
def creg_mag_2_cal2_1(self, new_value):
addr = 0x20
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_2(self):
"""
Row 2, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_2 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_2,
@creg_mag_2_cal2_2.setter
def creg_mag_2_cal2_2(self, new_value):
addr = 0x21
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_3(self):
"""
Row 2, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_3 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_3,
@creg_mag_2_cal2_3.setter
def creg_mag_2_cal2_3(self, new_value):
addr = 0x22
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_1(self):
"""
Row 3, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_1 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_1,
@creg_mag_2_cal3_1.setter
def creg_mag_2_cal3_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_2(self):
"""
Row 3, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_2 as float;
"""
addr = 0x24
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_2,
@creg_mag_2_cal3_2.setter
def creg_mag_2_cal3_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_3(self):
"""
Row 3, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_3 as float;
"""
addr = 0x25
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_3,
@creg_mag_2_cal3_3.setter
def creg_mag_2_cal3_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_x(self):
"""
This register stores a bias term for the magnetometer 2 x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_X as float;
"""
addr = 0x26
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_x,
@creg_mag_2_bias_x.setter
def creg_mag_2_bias_x(self, new_value):
addr = 0x26
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_y(self):
"""
This register stores a bias term for the magnetometer 2 y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_Y as float;
"""
addr = 0x27
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_y,
@creg_mag_2_bias_y.setter
def creg_mag_2_bias_y(self, new_value):
addr = 0x27
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_z(self):
"""
This register stores a bias term for the magnetometer 2 z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_Z as float;
"""
addr = 0x28
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_z,
@creg_mag_2_bias_z.setter
def creg_mag_2_bias_z(self, new_value):
addr = 0x28
self.write_register(addr, new_value)
@property
def creg_accel_1_meas_range(self):
"""
The CREG_ACCEL_1_MEAS_RANGE register sets the desired measurement range for the accelerometer 1. If the rate
is not set, then the default value of the +-2 g will be used as a measurement range.
Payload structure:
[1:0] : MEAS_ACC1 -- Specifies the desired measurement range for the accelerometer 1 measurements.
:return: MEAS_ACC1 as bitField;
"""
addr = 0x29
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_ACC1 bit field
meas_acc1_val = (reg.raw_value >> 0) & 0x0003
meas_acc1_enum = reg.find_field_by(name='MEAS_ACC1').find_enum_entry_by(value=meas_acc1_val)
return reg, meas_acc1_enum
@creg_accel_1_meas_range.setter
def creg_accel_1_meas_range(self, new_value):
addr = 0x29
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_1(self):
"""
Row 1, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_1 as float;
"""
addr = 0x2A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_1,
@creg_accel_1_cal1_1.setter
def creg_accel_1_cal1_1(self, new_value):
addr = 0x2A
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_2(self):
"""
Row 1, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_2 as float;
"""
addr = 0x2B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_2,
@creg_accel_1_cal1_2.setter
def creg_accel_1_cal1_2(self, new_value):
addr = 0x2B
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_3(self):
"""
Row 1, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_3 as float;
"""
addr = 0x2C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_3,
@creg_accel_1_cal1_3.setter
def creg_accel_1_cal1_3(self, new_value):
addr = 0x2C
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_1(self):
"""
Row 2, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_1 as float;
"""
addr = 0x2D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_1,
@creg_accel_1_cal2_1.setter
def creg_accel_1_cal2_1(self, new_value):
addr = 0x2D
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_2(self):
"""
Row 2, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_2 as float;
"""
addr = 0x2E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_2,
@creg_accel_1_cal2_2.setter
def creg_accel_1_cal2_2(self, new_value):
addr = 0x2E
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_3(self):
"""
Row 2, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_3 as float;
"""
addr = 0x2F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_3,
@creg_accel_1_cal2_3.setter
def creg_accel_1_cal2_3(self, new_value):
addr = 0x2F
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_1(self):
"""
Row 3, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_1 as float;
"""
addr = 0x30
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_1,
@creg_accel_1_cal3_1.setter
def creg_accel_1_cal3_1(self, new_value):
addr = 0x30
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_2(self):
"""
Row 3, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_2 as float;
"""
addr = 0x31
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_2,
@creg_accel_1_cal3_2.setter
def creg_accel_1_cal3_2(self, new_value):
addr = 0x31
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_3(self):
"""
Row 3, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_3 as float;
"""
addr = 0x32
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_3,
@creg_accel_1_cal3_3.setter
def creg_accel_1_cal3_3(self, new_value):
addr = 0x32
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_x(self):
"""
This register stores a bias term for the accelerometer 1 x-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_X -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_X as float;
"""
addr = 0x33
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_x,
@creg_accel_1_bias_x.setter
def creg_accel_1_bias_x(self, new_value):
addr = 0x33
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_y(self):
"""
This register stores a bias term for the accelerometer 1 y-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_Y as float;
"""
addr = 0x34
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_y,
@creg_accel_1_bias_y.setter
def creg_accel_1_bias_y(self, new_value):
addr = 0x34
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_z(self):
"""
This register stores a bias term for the accelerometer 1 z-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_Z as float;
"""
addr = 0x35
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_z,
@creg_accel_1_bias_z.setter
def creg_accel_1_bias_z(self, new_value):
addr = 0x35
self.write_register(addr, new_value)
@property
def dreg_health(self):
"""
The health register reports the current status of the sensors on the board. Monitoring the health register is
the easiest way to watch for other problems that could affect the behavior of the board, status of the
sensors. The analogous to the health register, the status of the GPS signal can be monitored in the
DREG_GPS_HEALTH
Payload structure:
[8] : OVF -- Overflow bit. This bit is set if the board is attempting to transmit data over the serial port faster than is allowed given the baud-rate. If this bit is set, reduce broadcast rates in the COM_RATES registers.
[7] : ACC1_N -- This bit is set if the sensor detects that the norm of the accelerometer measurement is too far away from 1G to be used (i.e. during aggressive acceleration or high vibration).
[6] : MAG1_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement for the magnetometer 1 is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[5] : MAG2_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement for the magnetometer 2 is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[4] : ACCEL1 -- This bit will be set if the accelerometer 1 fails to initialize on startup.
[3] : GYRO1 -- This bit will be set if the rate gyro 1 fails to initialize on startup.
[2] : GYRO2 -- This bit will be set if the rate gyro 2 fails to initialize on startup.
[1] : MAG1 -- This bit will be set if the magnetometer 1 fails to initialize on startup.
[0] : MAG2 -- This bit will be set if the magnetometer 2 fails to initialize on startup.
:return: OVF as bitField; ACC1_N as bitField; MAG1_N as bitField; MAG2_N as bitField; ACCEL1 as bitField; GYRO1 as bitField; GYRO2 as bitField; MAG1 as bitField; MAG2 as bitField;
"""
addr = 0x55
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_HEALTH')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for OVF bit field
ovf_val = (reg.raw_value >> 8) & 0x0001
ovf_enum = reg.find_field_by(name='OVF').find_enum_entry_by(value=ovf_val)
# find value for ACC1_N bit field
acc1_n_val = (reg.raw_value >> 7) & 0x0001
acc1_n_enum = reg.find_field_by(name='ACC1_N').find_enum_entry_by(value=acc1_n_val)
# find value for MAG1_N bit field
mag1_n_val = (reg.raw_value >> 6) & 0x0001
mag1_n_enum = reg.find_field_by(name='MAG1_N').find_enum_entry_by(value=mag1_n_val)
# find value for MAG2_N bit field
mag2_n_val = (reg.raw_value >> 5) & 0x0001
mag2_n_enum = reg.find_field_by(name='MAG2_N').find_enum_entry_by(value=mag2_n_val)
# find value for ACCEL1 bit field
accel1_val = (reg.raw_value >> 4) & 0x0001
accel1_enum = reg.find_field_by(name='ACCEL1').find_enum_entry_by(value=accel1_val)
# find value for GYRO1 bit field
gyro1_val = (reg.raw_value >> 3) & 0x0001
gyro1_enum = reg.find_field_by(name='GYRO1').find_enum_entry_by(value=gyro1_val)
# find value for GYRO2 bit field
gyro2_val = (reg.raw_value >> 2) & 0x0001
gyro2_enum = reg.find_field_by(name='GYRO2').find_enum_entry_by(value=gyro2_val)
# find value for MAG1 bit field
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
# find value for MAG2 bit field
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, ovf_enum, acc1_n_enum, mag1_n_enum, mag2_n_enum, accel1_enum, gyro1_enum, gyro2_enum, mag1_enum, mag2_enum
@property
def dreg_gyro_1_raw_xy(self):
"""
Contains raw X and Y axis rate gyro 1 data.
Payload structure:
[31:16] : GYRO_1_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_1_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_1_RAW_X as int16_t; GYRO_1_RAW_Y as int16_t;
"""
addr = 0x56
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_1_raw_x, gyro_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_1_raw_x, gyro_1_raw_y
@property
def dreg_gyro_1_raw_z(self):
"""
Contains raw Z axis rate gyro 1 data.
Payload structure:
[31:16] : GYRO_1_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_1_RAW_Z as int16_t;
"""
addr = 0x57
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_1_raw_z,
@property
def dreg_gyro_1_raw_time(self):
"""
Contains time at which the last rate gyro 1 data was acquired.
Payload structure:
[31:0] : GYRO_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_1_RAW_TIME as float;
"""
addr = 0x58
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_raw_time,
@property
def dreg_gyro_2_raw_xy(self):
"""
Contains raw X and Y axis rate gyro 2 data.
Payload structure:
[31:16] : GYRO_2_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_2_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_2_RAW_X as int16_t; GYRO_2_RAW_Y as int16_t;
"""
addr = 0x59
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_2_raw_x, gyro_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_2_raw_x, gyro_2_raw_y
@property
def dreg_gyro_2_raw_z(self):
"""
Contains raw Z axis rate gyro 2 data.
Payload structure:
[31:16] : GYRO_2_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_2_RAW_Z as int16_t;
"""
addr = 0x5A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_2_raw_z,
@property
def dreg_gyro_2_raw_time(self):
"""
Contains time at which the last rate gyro 2 data was acquired.
Payload structure:
[31:0] : GYRO_2_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_2_RAW_TIME as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_raw_time,
@property
def dreg_accel_1_raw_xy(self):
"""
Contains raw X and Y axis accelerometer 1 data.
Payload structure:
[31:16] : ACCEL_1_RAW_X -- Accel X (2s complement 16-bit integer)
[15:0] : ACCEL_1_RAW_Y -- Accel Y (2s complement 16-bit integer)
:return: ACCEL_1_RAW_X as int16_t; ACCEL_1_RAW_Y as int16_t;
"""
addr = 0x5C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
accel_1_raw_x, accel_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, accel_1_raw_x, accel_1_raw_y
@property
def dreg_accel_1_raw_z(self):
"""
Contains raw Z axis accelerometer 1 data.
Payload structure:
[31:16] : ACCEL_1_RAW_Z -- Accel Z (2s complement 16-bit integer)
:return: ACCEL_1_RAW_Z as int16_t;
"""
addr = 0x5D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
accel_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, accel_1_raw_z,
@property
def dreg_accel_1_raw_time(self):
"""
Contains time at which the last raw data sample for the accelerometer 1 was acquired.
Payload structure:
[31:0] : ACCEL_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_RAW_TIME as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_raw_time,
@property
def dreg_mag_1_raw_x(self):
"""
Contains raw x axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_X -- 32-bit signed integer value
:return: MAG_1_RAW_X as int32_t;
"""
addr = 0x5F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_x, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_x,
@property
def dreg_mag_1_raw_y(self):
"""
Contains raw y axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_Y -- 32-bit signed integer value
:return: MAG_1_RAW_Y as int32_t;
"""
addr = 0x60
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_y, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_y,
@property
def dreg_mag_1_raw_z(self):
"""
Contains raw z axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_Z -- 32-bit signed integer value
:return: MAG_1_RAW_Z as int32_t;
"""
addr = 0x61
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Z')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_z, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_z,
@property
def dreg_mag_1_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer 1 was acquired.
Payload structure:
[31:0] : MAG_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_1_RAW_TIME as float;
"""
addr = 0x62
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_raw_time,
@property
def dreg_mag_2_raw_xy(self):
"""
Contains raw X and Y axis magnetometer 2 data.
Payload structure:
[31:16] : MAG_2_RAW_X -- Magnetometer X (2s complement 16-bit integer)
[15:0] : MAG_2_RAW_Y -- Magnetometer Y (2s complement 16-bit integer)
:return: MAG_2_RAW_X as int16_t; MAG_2_RAW_Y as int16_t;
"""
addr = 0x63
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
mag_2_raw_x, mag_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, mag_2_raw_x, mag_2_raw_y
@property
def dreg_mag_2_raw_z(self):
"""
Contains raw Z axis magnetometer 2 data.
Payload structure:
[31:16] : MAG_2_RAW_Z -- Magnetometer Z (2s complement 16-bit integer)
:return: MAG_2_RAW_Z as int16_t;
"""
addr = 0x64
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
mag_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, mag_2_raw_z,
@property
def dreg_mag_2_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer 2 was acquired.
Payload structure:
[31:0] : MAG_2_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_2_RAW_TIME as float;
"""
addr = 0x65
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_raw_time,
@property
def dreg_temperature(self):
"""
Contains the temperature output of the onboard temperature sensor.
Payload structure:
[31:0] : TEMPERATURE -- Temperature in degrees Celcius (32-bit IEEE Floating Point)
:return: TEMPERATURE as float;
"""
addr = 0x66
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature, = struct.unpack('>f', payload[0:4])
return reg, temperature,
@property
def dreg_temperature_time(self):
"""
Contains time at which the last temperature was acquired.
Payload structure:
[31:0] : TEMPERATURE_TIME -- 32-bit IEEE Floating Point Value
:return: TEMPERATURE_TIME as float;
"""
addr = 0x67
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature_time, = struct.unpack('>f', payload[0:4])
return reg, temperature_time,
@property
def dreg_gyro_1_proc_x(self):
"""
Contains the actual measured angular rate from the gyro 1 for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_X as float;
"""
addr = 0x68
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_x,
@property
def dreg_gyro_1_proc_y(self):
"""
Contains the actual measured angular rate from the gyro 1 for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_Y as float;
"""
addr = 0x69
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_y,
@property
def dreg_gyro_1_proc_z(self):
"""
Contains the actual measured angular rate from the gyro 1 for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_Z as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_z,
@property
def dreg_gyro_1_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro 1 was measured.
Payload structure:
[31:0] : GYRO_1_PROC_TIME -- Gyro 1 time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_TIME as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_time,
@property
def dreg_gyro_2_proc_x(self):
"""
Contains the actual measured angular rate from the gyro 2 for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_X as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_x,
@property
def dreg_gyro_2_proc_y(self):
"""
Contains the actual measured angular rate from the gyro 2 for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_Y as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_y,
@property
def dreg_gyro_2_proc_z(self):
"""
Contains the actual measured angular rate from the gyro 2 for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_Z as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_z,
@property
def dreg_gyro_2_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro 2 was measured.
Payload structure:
[31:0] : GYRO_2_PROC_TIME -- Gyro 2 time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_TIME as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_time,
@property
def dreg_accel_1_proc_x(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the x axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_X -- Acceleration X in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_X as float;
"""
addr = 0x70
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_x,
@property
def dreg_accel_1_proc_y(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the y axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_Y -- Acceleration Y in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_Y as float;
"""
addr = 0x71
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_y,
@property
def dreg_accel_1_proc_z(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the z axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_Z -- Acceleration Z in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_Z as float;
"""
addr = 0x72
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_z,
@property
def dreg_accel_1_proc_time(self):
"""
Contains the time at which the last acceleration data from the accelerometer 1 was measured.
Payload structure:
[31:0] : ACCEL_1_PROC_TIME -- Accelerometer 1 time stamp (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_TIME as float;
"""
addr = 0x73
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_time,
@property
def dreg_mag_1_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the x axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_X -- Magnetometer X in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_X as float;
"""
addr = 0x74
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_x,
@property
def dreg_mag_1_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the y axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_Y -- Magnetometer Y in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_Y as float;
"""
addr = 0x75
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_y,
@property
def dreg_mag_1_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the z axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_Z -- Magnetometer Z in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_Z as float;
"""
addr = 0x76
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_z,
@property
def dreg_mag_1_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 1 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_1_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_1_NORM as float;
"""
addr = 0x77
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_1_norm,
@property
def dreg_mag_1_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer 1 data was acquired.
Payload structure:
[31:0] : MAG_1_PROC_TIME -- Magnetometer 1 time stamp (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_TIME as float;
"""
addr = 0x78
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_time,
@property
def dreg_mag_2_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the x axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_X -- Magnetometer X in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_X as float;
"""
addr = 0x79
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_x,
@property
def dreg_mag_2_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the y axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_Y -- Magnetometer Y in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_Y as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_y,
@property
def dreg_mag_2_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the z axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_Z -- Magnetometer Z in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_Z as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_z,
@property
def dreg_mag_2_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 2 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_2_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_2_NORM as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_2_norm,
@property
def dreg_mag_2_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer 2 data was acquired.
Payload structure:
[31:0] : MAG_2_PROC_TIME -- Magnetometer 2 time stamp (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_TIME as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_time,
@property
def dreg_quat_ab(self):
"""
Contains the first two components (a and b) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_A -- First quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_B -- Second quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_A as int16_t; QUAT_B as int16_t;
"""
addr = 0x7E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_AB')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_a, quat_b = struct.unpack('>hh', payload[0:4])
return reg, quat_a, quat_b
@property
def dreg_quat_cd(self):
"""
Contains the second two components (c and d) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_C -- Third quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_D -- Fourth quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_C as int16_t; QUAT_D as int16_t;
"""
addr = 0x7F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_CD')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_c, quat_d = struct.unpack('>hh', payload[0:4])
return reg, quat_c, quat_d
@property
def dreg_quat_time(self):
"""
Contains the time that the quaternion attitude was estimated.
Payload structure:
[31:0] : QUAT_TIME -- Quaternion time (32-bit IEEE Floating Point Value)
:return: QUAT_TIME as float;
"""
addr = 0x80
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
quat_time, = struct.unpack('>f', payload[0:4])
return reg, quat_time,
@property
def dreg_euler_phi_theta(self):
"""
Contains the pitch and roll angle estimates.
Payload structure:
[31:16] : PHI -- Roll angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
[15:0] : THETA -- Pitch angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PHI as int16_t; THETA as int16_t;
"""
addr = 0x81
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi, theta = struct.unpack('>hh', payload[0:4])
return reg, phi, theta
@property
def dreg_euler_psi(self):
"""
Contains the yaw angle estimate.
Payload structure:
[31:16] : PSI -- Yaw angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PSI as int16_t;
"""
addr = 0x82
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi, = struct.unpack('>hxx', payload[0:4])
return reg, psi,
@property
def dreg_euler_phi_theta_dot(self):
"""
Contains the pitch and roll rate estimates.
Payload structure:
[31:16] : PHI_DOT -- Roll rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
[15:0] : THETA_DOT -- Pitch rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PHI_DOT as int16_t; THETA_DOT as int16_t;
"""
addr = 0x83
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA_DOT')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi_dot, theta_dot = struct.unpack('>hh', payload[0:4])
return reg, phi_dot, theta_dot
@property
def dreg_euler_psi_dot(self):
"""
Contains the yaw rate estimate.
Payload structure:
[31:16] : PSI_DOT -- Yaw rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PSI_DOT as int16_t;
"""
addr = 0x84
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI_DOT')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi_dot, = struct.unpack('>hxx', payload[0:4])
return reg, psi_dot,
@property
def dreg_euler_time(self):
"""
Contains the time that the Euler Angles were estimated.
Payload structure:
[31:0] : EULER_TIME -- Euler time (32-bit IEEE Floating Point Value)
:return: EULER_TIME as float;
"""
addr = 0x85
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
euler_time, = struct.unpack('>f', payload[0:4])
return reg, euler_time,
@property
def dreg_position_north(self):
"""
Contains the measured north position in meters from the latitude specified in CREG_HOME_NORTH.
Payload structure:
[31:0] : POSITION_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: POSITION_NORTH as float;
"""
addr = 0x86
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_north, = struct.unpack('>f', payload[0:4])
return reg, position_north,
@property
def dreg_position_east(self):
"""
Contains the measured east position in meters from the longitude specified in CREG_HOME_EAST.
Payload structure:
[31:0] : POSITION_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: POSITION_EAST as float;
"""
addr = 0x87
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_east, = struct.unpack('>f', payload[0:4])
return reg, position_east,
@property
def dreg_position_up(self):
"""
Contains the measured altitude in meters from the altitude specified in CREG_HOME_UP.
Payload structure:
[31:0] : POSITION_UP -- Altitude (32-bit IEEE Floating Point Value)
:return: POSITION_UP as float;
"""
addr = 0x88
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_up, = struct.unpack('>f', payload[0:4])
return reg, position_up,
@property
def dreg_position_time(self):
"""
Contains the time at which the position was acquired.
Payload structure:
[31:0] : POSITION_TIME -- Position Time (32-bit IEEE Floating Point Value)
:return: POSITION_TIME as float;
"""
addr = 0x89
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_time, = struct.unpack('>f', payload[0:4])
return reg, position_time,
@property
def dreg_velocity_north(self):
"""
Contains the measured north velocity in m/s.
Payload structure:
[31:0] : VELOCITY_NORTH -- North Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_NORTH as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_north, = struct.unpack('>f', payload[0:4])
return reg, velocity_north,
@property
def dreg_velocity_east(self):
"""
Contains the measured east velocity in m/s.
Payload structure:
[31:0] : VELOCITY_EAST -- East Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_EAST as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_east, = struct.unpack('>f', payload[0:4])
return reg, velocity_east,
@property
def dreg_velocity_up(self):
"""
Contains the measured altitude velocity in m/s.
Payload structure:
[31:0] : VELOCITY_UP -- Altitude Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_UP as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_up, = struct.unpack('>f', payload[0:4])
return reg, velocity_up,
@property
def dreg_velocity_time(self):
"""
Contains the time at which the velocity was measured.
Payload structure:
[31:0] : VELOCITY_TIME -- Velocity time (32-bit IEEE Floating Point Value)
:return: VELOCITY_TIME as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_time, = struct.unpack('>f', payload[0:4])
return reg, velocity_time,
@property
def dreg_gyro_1_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_X -- Gyro 1 bias X (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_X as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_x,
@property
def dreg_gyro_1_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_Y -- Gyro 1 bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_Y as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_y,
@property
def dreg_gyro_1_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_Z -- Gyro 1 bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_Z as float;
"""
addr = 0x90
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_z,
@property
def dreg_gyro_2_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_X -- Gyro 2 bias X (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_X as float;
"""
addr = 0x91
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_x,
@property
def dreg_gyro_2_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_Y -- Gyro 2 bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_Y as float;
"""
addr = 0x92
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_y,
@property
def dreg_gyro_2_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_Z -- Gyro 2 bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_Z as float;
"""
addr = 0x93
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_z,
@property
def get_fw_build_id(self):
"""
Firmware build identification string: a four byte ASCII character sequence which corresponds to a firmware
series.
Payload structure:
[31:0] : FW_BUILD_ID -- Firmware Build ID string
:return: FW_BUILD_ID as string;
"""
addr = 0xAA
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_ID')
reg.raw_value, = struct.unpack('>I', payload[0:4])
fw_build_id = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return fw_build_id
@property
def get_fw_build_version(self):
"""
Firmware build version provides the unique identifier of the firmware programmed in the board. A response is
four bytes long and identifies major and minor build version, and the build number.
Payload structure:
[31:24] : VERSION_MAJOR -- 8-bit unsigned integer major version number
[23:16] : VERSION_MINOR -- 8-bit unsigned integer minor version number
[15:0] : BUILD_ID -- 16-bit unsigned integer build ID number
:return: VERSION_MAJOR as uint8_t; VERSION_MINOR as uint8_t; BUILD_ID as uint16_t;
"""
addr = 0xAB
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
version_major, version_minor, build_id = struct.unpack('>BBH', payload[0:4])
return reg, version_major, version_minor, build_id
@property
def flash_commit(self):
raise RuntimeError('flash_commit has no getter! The register flash_commit is write-only!')
@flash_commit.setter
def flash_commit(self, new_value):
addr = 0xAC
self.write_register(addr, new_value)
@property
def reset_to_factory(self):
raise RuntimeError('reset_to_factory has no getter! The register reset_to_factory is write-only!')
@reset_to_factory.setter
def reset_to_factory(self, new_value):
addr = 0xAD
self.write_register(addr, new_value)
@property
def zero_gyros(self):
raise RuntimeError('zero_gyros has no getter! The register zero_gyros is write-only!')
@zero_gyros.setter
def zero_gyros(self, new_value):
addr = 0xAE
self.write_register(addr, new_value)
@property
def set_home_position(self):
raise RuntimeError('set_home_position has no getter! The register set_home_position is write-only!')
@set_home_position.setter
def set_home_position(self, new_value):
addr = 0xB0
self.write_register(addr, new_value)
@property
def set_mag_reference(self):
raise RuntimeError('set_mag_reference has no getter! The register set_mag_reference is write-only!')
@set_mag_reference.setter
def set_mag_reference(self, new_value):
addr = 0xB1
self.write_register(addr, new_value)
@property
def calibrate_accelerometers(self):
raise RuntimeError('calibrate_accelerometers has no getter! The register calibrate_accelerometers is write-only!')
@calibrate_accelerometers.setter
def calibrate_accelerometers(self, new_value):
addr = 0xB2
self.write_register(addr, new_value)
@property
def reset_fusion(self):
raise RuntimeError('reset_fusion has no getter! The register reset_fusion is write-only!')
@reset_fusion.setter
def reset_fusion(self, new_value):
addr = 0xB3
self.write_register(addr, new_value)
@property
def enable_zupt(self):
raise RuntimeError('enable_zupt has no getter! The register enable_zupt is write-only!')
@enable_zupt.setter
def enable_zupt(self, new_value):
addr = 0xB4
self.write_register(addr, new_value)
@property
def euler_mode(self):
raise RuntimeError('euler_mode has no getter! The register euler_mode is write-only!')
@euler_mode.setter
def euler_mode(self, new_value):
addr = 0xB5
self.write_register(addr, new_value)
@property
def quaternion_mode(self):
raise RuntimeError('quaternion_mode has no getter! The register quaternion_mode is write-only!')
@quaternion_mode.setter
def quaternion_mode(self, new_value):
addr = 0xB6
self.write_register(addr, new_value)
@property
def enable_rt_calibration(self):
raise RuntimeError('enable_rt_calibration has no getter! The register enable_rt_calibration is write-only!')
@enable_rt_calibration.setter
def enable_rt_calibration(self, new_value):
addr = 0xB7
self.write_register(addr, new_value)
@property
def en_mag_anomaly_detection(self):
raise RuntimeError('en_mag_anomaly_detection has no getter! The register en_mag_anomaly_detection is write-only!')
@en_mag_anomaly_detection.setter
def en_mag_anomaly_detection(self, new_value):
addr = 0xB8
self.write_register(addr, new_value)
@property
def run_self_tests(self):
raise RuntimeError('run_self_tests has no getter! The register run_self_tests is write-only!')
@run_self_tests.setter
def run_self_tests(self, new_value):
addr = 0xB9
self.write_register(addr, new_value)
@property
def enable_external_event(self):
raise RuntimeError('enable_external_event has no getter! The register enable_external_event is write-only!')
@enable_external_event.setter
def enable_external_event(self, new_value):
addr = 0xBA
self.write_register(addr, new_value)
@property
def enable_gnns_fusion(self):
raise RuntimeError('enable_gnns_fusion has no getter! The register enable_gnns_fusion is write-only!')
@enable_gnns_fusion.setter
def enable_gnns_fusion(self, new_value):
addr = 0xBB
self.write_register(addr, new_value)
@property
def enable_usr_euler_output(self):
raise RuntimeError('enable_usr_euler_output has no getter! The register enable_usr_euler_output is write-only!')
@enable_usr_euler_output.setter
def enable_usr_euler_output(self, new_value):
addr = 0xBC
self.write_register(addr, new_value)
@property
def enable_dead_reckoning(self):
raise RuntimeError('enable_dead_reckoning has no getter! The register enable_dead_reckoning is write-only!')
@enable_dead_reckoning.setter
def enable_dead_reckoning(self, new_value):
addr = 0xBD
self.write_register(addr, new_value)
@property
def enable_heave_sway_surge(self):
raise RuntimeError('enable_heave_sway_surge has no getter! The register enable_heave_sway_surge is write-only!')
@enable_heave_sway_surge.setter
def enable_heave_sway_surge(self, new_value):
addr = 0xBE
self.write_register(addr, new_value)
@property
def enable_ukf(self):
raise RuntimeError('enable_ukf has no getter! The register enable_ukf is write-only!')
@enable_ukf.setter
def enable_ukf(self, new_value):
addr = 0xBF
self.write_register(addr, new_value)
@property
def board_unique_id_1(self):
"""
First 32-bits of the 64-bits of the board unique identifier. Bits of the unique identifier cannot be modified
by the user.
Payload structure:
[31:0] : BOARD_UNIQUE_ID_1_BITS -- Board unique ID bits
:return: BOARD_UNIQUE_ID_1_BITS as uint32_t;
"""
addr = 0xFD
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_1_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_1_bits,
@property
def board_unique_id_2(self):
"""
Last 32-bits of the 64-bits of the board unique identifier. Bits of the unique identifier cannot be modified
by the user.
Payload structure:
[31:0] : BOARD_UNIQUE_ID_2_BITS -- Board unique ID bits
:return: BOARD_UNIQUE_ID_2_BITS as uint32_t;
"""
addr = 0xFE
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_2_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_2_bits,
@property
def protocol_version(self):
"""
String version of the protocol.
Payload structure:
[31:0] : PROTOCOL_VERSION_STR -- Protocol version string
:return: PROTOCOL_VERSION_STR as string;
"""
addr = 0xFF
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='PROTOCOL_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
protocol_version_str = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return protocol_version_str
@property
def hidden_gyro_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_VARIANCE as float;
"""
addr = 0x00
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_variance,
@hidden_gyro_1_variance.setter
def hidden_gyro_1_variance(self, new_value):
addr = 0x00
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_VARIANCE as float;
"""
addr = 0x01
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_variance,
@hidden_gyro_2_variance.setter
def hidden_gyro_2_variance(self, new_value):
addr = 0x01
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_VARIANCE as float;
"""
addr = 0x02
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_variance,
@hidden_accel_1_variance.setter
def hidden_accel_1_variance(self, new_value):
addr = 0x02
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_VARIANCE as float;
"""
addr = 0x03
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_variance,
@hidden_mag_1_variance.setter
def hidden_mag_1_variance(self, new_value):
addr = 0x03
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_VARIANCE as float;
"""
addr = 0x04
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_variance,
@hidden_mag_2_variance.setter
def hidden_mag_2_variance(self, new_value):
addr = 0x04
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_course_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_COURSE_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_COURSE_VARIANCE as float;
"""
addr = 0x05
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_COURSE_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_course_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_course_variance,
@hidden_gps_course_variance.setter
def hidden_gps_course_variance(self, new_value):
addr = 0x05
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_position_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_POSITION_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_POSITION_VARIANCE as float;
"""
addr = 0x06
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_POSITION_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_position_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_position_variance,
@hidden_gps_position_variance.setter
def hidden_gps_position_variance(self, new_value):
addr = 0x06
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_velocity_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_VELOCITY_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_VELOCITY_VARIANCE as float;
"""
addr = 0x07
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_VELOCITY_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_velocity_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_velocity_variance,
@hidden_gps_velocity_variance.setter
def hidden_gps_velocity_variance(self, new_value):
addr = 0x07
self.write_register(addr, new_value, hidden=True)
@property
def hidden_static_press_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_STATIC_PRESS_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_STATIC_PRESS_VARIANCE as float;
"""
addr = 0x08
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_STATIC_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_static_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_static_press_variance,
@hidden_static_press_variance.setter
def hidden_static_press_variance(self, new_value):
addr = 0x08
self.write_register(addr, new_value, hidden=True)
@property
def hidden_diff_press_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_DIFF_PRESS_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_DIFF_PRESS_VARIANCE as float;
"""
addr = 0x09
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_DIFF_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_diff_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_diff_press_variance,
@hidden_diff_press_variance.setter
def hidden_diff_press_variance(self, new_value):
addr = 0x09
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_uvw(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_UVW -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_UVW as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_UVW')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_uvw, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_uvw,
@hidden_q_uvw.setter
def hidden_q_uvw(self, new_value):
addr = 0x0A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_quaternion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_QUATERNION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_QUATERNION as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_QUATERNION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_quaternion, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_quaternion,
@hidden_q_quaternion.setter
def hidden_q_quaternion(self, new_value):
addr = 0x0B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_gps_position(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_GPS_POSITION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_GPS_POSITION as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_GPS_POSITION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_gps_position, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_gps_position,
@hidden_q_gps_position.setter
def hidden_q_gps_position(self, new_value):
addr = 0x0C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_bias(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_BIAS -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_BIAS as float;
"""
addr = 0x0D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_BIAS')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_bias, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_bias,
@hidden_q_bias.setter
def hidden_q_bias(self, new_value):
addr = 0x0D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_euler_angles(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_EULER_ANGLES -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_EULER_ANGLES as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_EULER_ANGLES')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_euler_angles, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_euler_angles,
@hidden_q_euler_angles.setter
def hidden_q_euler_angles(self, new_value):
addr = 0x0E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_low_vg_accel_noise_factor(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_low_vg_accel_noise_factor, = struct.unpack('>f', payload[0:4])
return reg, hidden_low_vg_accel_noise_factor,
@hidden_low_vg_accel_noise_factor.setter
def hidden_low_vg_accel_noise_factor(self, new_value):
addr = 0x0F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_groundspeed(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GROUNDSPEED -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GROUNDSPEED as float;
"""
addr = 0x10
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GROUNDSPEED')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_groundspeed, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_groundspeed,
@hidden_lpf_tau_groundspeed.setter
def hidden_lpf_tau_groundspeed(self, new_value):
addr = 0x10
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GYRO_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GYRO_1 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_1,
@hidden_lpf_tau_gyro_1.setter
def hidden_lpf_tau_gyro_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GYRO_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GYRO_2 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_2,
@hidden_lpf_tau_gyro_2.setter
def hidden_lpf_tau_gyro_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_accel_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_ACCEL_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_ACCEL_1 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_ACCEL_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_accel_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_accel_1,
@hidden_lpf_tau_accel_1.setter
def hidden_lpf_tau_accel_1(self, new_value):
addr = 0x13
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_MAG_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_MAG_1 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_1,
@hidden_lpf_tau_mag_1.setter
def hidden_lpf_tau_mag_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_MAG_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_MAG_2 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_2,
@hidden_lpf_tau_mag_2.setter
def hidden_lpf_tau_mag_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_0 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_0,
@hidden_c_gyro_1_bias_x_pow_0.setter
def hidden_c_gyro_1_bias_x_pow_0(self, new_value):
addr = 0x16
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_1 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_1,
@hidden_c_gyro_1_bias_x_pow_1.setter
def hidden_c_gyro_1_bias_x_pow_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_2 as float;
"""
addr = 0x18
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_2,
@hidden_c_gyro_1_bias_x_pow_2.setter
def hidden_c_gyro_1_bias_x_pow_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_3 as float;
"""
addr = 0x19
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_3,
@hidden_c_gyro_1_bias_x_pow_3.setter
def hidden_c_gyro_1_bias_x_pow_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_0 as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_0,
@hidden_c_gyro_1_bias_y_pow_0.setter
def hidden_c_gyro_1_bias_y_pow_0(self, new_value):
addr = 0x1A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_1 as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_1,
@hidden_c_gyro_1_bias_y_pow_1.setter
def hidden_c_gyro_1_bias_y_pow_1(self, new_value):
addr = 0x1B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_2 as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_2,
@hidden_c_gyro_1_bias_y_pow_2.setter
def hidden_c_gyro_1_bias_y_pow_2(self, new_value):
addr = 0x1C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_3 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_3,
@hidden_c_gyro_1_bias_y_pow_3.setter
def hidden_c_gyro_1_bias_y_pow_3(self, new_value):
addr = 0x1D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_0 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_0,
@hidden_c_gyro_1_bias_z_pow_0.setter
def hidden_c_gyro_1_bias_z_pow_0(self, new_value):
addr = 0x1E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_1 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_1,
@hidden_c_gyro_1_bias_z_pow_1.setter
def hidden_c_gyro_1_bias_z_pow_1(self, new_value):
addr = 0x1F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_2 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_2,
@hidden_c_gyro_1_bias_z_pow_2.setter
def hidden_c_gyro_1_bias_z_pow_2(self, new_value):
addr = 0x20
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_3 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_3,
@hidden_c_gyro_1_bias_z_pow_3.setter
def hidden_c_gyro_1_bias_z_pow_3(self, new_value):
addr = 0x21
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_0 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_0,
@hidden_c_gyro_1_scale_x_pow_0.setter
def hidden_c_gyro_1_scale_x_pow_0(self, new_value):
addr = 0x22
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_1 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_1,
@hidden_c_gyro_1_scale_x_pow_1.setter
def hidden_c_gyro_1_scale_x_pow_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_2 as float;
"""
addr = 0x24
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_2,
@hidden_c_gyro_1_scale_x_pow_2.setter
def hidden_c_gyro_1_scale_x_pow_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_3 as float;
"""
addr = 0x25
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_3,
@hidden_c_gyro_1_scale_x_pow_3.setter
def hidden_c_gyro_1_scale_x_pow_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_0 as float;
"""
addr = 0x26
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_0,
@hidden_c_gyro_1_scale_y_pow_0.setter
def hidden_c_gyro_1_scale_y_pow_0(self, new_value):
addr = 0x26
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_1 as float;
"""
addr = 0x27
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_1,
@hidden_c_gyro_1_scale_y_pow_1.setter
def hidden_c_gyro_1_scale_y_pow_1(self, new_value):
addr = 0x27
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_2 as float;
"""
addr = 0x28
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_2,
@hidden_c_gyro_1_scale_y_pow_2.setter
def hidden_c_gyro_1_scale_y_pow_2(self, new_value):
addr = 0x28
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_3 as float;
"""
addr = 0x29
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_3,
@hidden_c_gyro_1_scale_y_pow_3.setter
def hidden_c_gyro_1_scale_y_pow_3(self, new_value):
addr = 0x29
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_0 as float;
"""
addr = 0x2A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_0,
@hidden_c_gyro_1_scale_z_pow_0.setter
def hidden_c_gyro_1_scale_z_pow_0(self, new_value):
addr = 0x2A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_1 as float;
"""
addr = 0x2B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_1,
@hidden_c_gyro_1_scale_z_pow_1.setter
def hidden_c_gyro_1_scale_z_pow_1(self, new_value):
addr = 0x2B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_2 as float;
"""
addr = 0x2C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_2,
@hidden_c_gyro_1_scale_z_pow_2.setter
def hidden_c_gyro_1_scale_z_pow_2(self, new_value):
addr = 0x2C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_3 as float;
"""
addr = 0x2D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_3,
@hidden_c_gyro_1_scale_z_pow_3.setter
def hidden_c_gyro_1_scale_z_pow_3(self, new_value):
addr = 0x2D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_1 as float;
"""
addr = 0x2E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_1,
@hidden_gyro_1_alignment1_1.setter
def hidden_gyro_1_alignment1_1(self, new_value):
addr = 0x2E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_2 as float;
"""
addr = 0x2F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_2,
@hidden_gyro_1_alignment1_2.setter
def hidden_gyro_1_alignment1_2(self, new_value):
addr = 0x2F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_3 as float;
"""
addr = 0x30
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_3,
@hidden_gyro_1_alignment1_3.setter
def hidden_gyro_1_alignment1_3(self, new_value):
addr = 0x30
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_1 as float;
"""
addr = 0x31
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_1,
@hidden_gyro_1_alignment2_1.setter
def hidden_gyro_1_alignment2_1(self, new_value):
addr = 0x31
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_2 as float;
"""
addr = 0x32
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_2,
@hidden_gyro_1_alignment2_2.setter
def hidden_gyro_1_alignment2_2(self, new_value):
addr = 0x32
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_3 as float;
"""
addr = 0x33
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_3,
@hidden_gyro_1_alignment2_3.setter
def hidden_gyro_1_alignment2_3(self, new_value):
addr = 0x33
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_1 as float;
"""
addr = 0x34
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_1,
@hidden_gyro_1_alignment3_1.setter
def hidden_gyro_1_alignment3_1(self, new_value):
addr = 0x34
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_2 as float;
"""
addr = 0x35
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_2,
@hidden_gyro_1_alignment3_2.setter
def hidden_gyro_1_alignment3_2(self, new_value):
addr = 0x35
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_3 as float;
"""
addr = 0x36
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_3,
@hidden_gyro_1_alignment3_3.setter
def hidden_gyro_1_alignment3_3(self, new_value):
addr = 0x36
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_0 as float;
"""
addr = 0x37
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_0,
@hidden_c_gyro_2_bias_x_pow_0.setter
def hidden_c_gyro_2_bias_x_pow_0(self, new_value):
addr = 0x37
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_1 as float;
"""
addr = 0x38
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_1,
@hidden_c_gyro_2_bias_x_pow_1.setter
def hidden_c_gyro_2_bias_x_pow_1(self, new_value):
addr = 0x38
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_2 as float;
"""
addr = 0x39
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_2,
@hidden_c_gyro_2_bias_x_pow_2.setter
def hidden_c_gyro_2_bias_x_pow_2(self, new_value):
addr = 0x39
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_3 as float;
"""
addr = 0x3A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_3,
@hidden_c_gyro_2_bias_x_pow_3.setter
def hidden_c_gyro_2_bias_x_pow_3(self, new_value):
addr = 0x3A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_0 as float;
"""
addr = 0x3B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_0,
@hidden_c_gyro_2_bias_y_pow_0.setter
def hidden_c_gyro_2_bias_y_pow_0(self, new_value):
addr = 0x3B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_1 as float;
"""
addr = 0x3C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_1,
@hidden_c_gyro_2_bias_y_pow_1.setter
def hidden_c_gyro_2_bias_y_pow_1(self, new_value):
addr = 0x3C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_2 as float;
"""
addr = 0x3D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_2,
@hidden_c_gyro_2_bias_y_pow_2.setter
def hidden_c_gyro_2_bias_y_pow_2(self, new_value):
addr = 0x3D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_3 as float;
"""
addr = 0x3E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_3,
@hidden_c_gyro_2_bias_y_pow_3.setter
def hidden_c_gyro_2_bias_y_pow_3(self, new_value):
addr = 0x3E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_0 as float;
"""
addr = 0x3F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_0,
@hidden_c_gyro_2_bias_z_pow_0.setter
def hidden_c_gyro_2_bias_z_pow_0(self, new_value):
addr = 0x3F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_1 as float;
"""
addr = 0x40
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_1,
@hidden_c_gyro_2_bias_z_pow_1.setter
def hidden_c_gyro_2_bias_z_pow_1(self, new_value):
addr = 0x40
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_2 as float;
"""
addr = 0x41
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_2,
@hidden_c_gyro_2_bias_z_pow_2.setter
def hidden_c_gyro_2_bias_z_pow_2(self, new_value):
addr = 0x41
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_3 as float;
"""
addr = 0x42
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_3,
@hidden_c_gyro_2_bias_z_pow_3.setter
def hidden_c_gyro_2_bias_z_pow_3(self, new_value):
addr = 0x42
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_0 as float;
"""
addr = 0x43
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_0,
@hidden_c_gyro_2_scale_x_pow_0.setter
def hidden_c_gyro_2_scale_x_pow_0(self, new_value):
addr = 0x43
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_1 as float;
"""
addr = 0x44
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_1,
@hidden_c_gyro_2_scale_x_pow_1.setter
def hidden_c_gyro_2_scale_x_pow_1(self, new_value):
addr = 0x44
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_2 as float;
"""
addr = 0x45
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_2,
@hidden_c_gyro_2_scale_x_pow_2.setter
def hidden_c_gyro_2_scale_x_pow_2(self, new_value):
addr = 0x45
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_3 as float;
"""
addr = 0x46
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_3,
@hidden_c_gyro_2_scale_x_pow_3.setter
def hidden_c_gyro_2_scale_x_pow_3(self, new_value):
addr = 0x46
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_0 as float;
"""
addr = 0x47
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_0,
@hidden_c_gyro_2_scale_y_pow_0.setter
def hidden_c_gyro_2_scale_y_pow_0(self, new_value):
addr = 0x47
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_1 as float;
"""
addr = 0x48
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_1,
@hidden_c_gyro_2_scale_y_pow_1.setter
def hidden_c_gyro_2_scale_y_pow_1(self, new_value):
addr = 0x48
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_2 as float;
"""
addr = 0x49
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_2,
@hidden_c_gyro_2_scale_y_pow_2.setter
def hidden_c_gyro_2_scale_y_pow_2(self, new_value):
addr = 0x49
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_3 as float;
"""
addr = 0x4A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_3,
@hidden_c_gyro_2_scale_y_pow_3.setter
def hidden_c_gyro_2_scale_y_pow_3(self, new_value):
addr = 0x4A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_0 as float;
"""
addr = 0x4B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_0,
@hidden_c_gyro_2_scale_z_pow_0.setter
def hidden_c_gyro_2_scale_z_pow_0(self, new_value):
addr = 0x4B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_1 as float;
"""
addr = 0x4C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_1,
@hidden_c_gyro_2_scale_z_pow_1.setter
def hidden_c_gyro_2_scale_z_pow_1(self, new_value):
addr = 0x4C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_2 as float;
"""
addr = 0x4D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_2,
@hidden_c_gyro_2_scale_z_pow_2.setter
def hidden_c_gyro_2_scale_z_pow_2(self, new_value):
addr = 0x4D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_3 as float;
"""
addr = 0x4E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_3,
@hidden_c_gyro_2_scale_z_pow_3.setter
def hidden_c_gyro_2_scale_z_pow_3(self, new_value):
addr = 0x4E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_1 as float;
"""
addr = 0x4F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_1,
@hidden_gyro_2_alignment1_1.setter
def hidden_gyro_2_alignment1_1(self, new_value):
addr = 0x4F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_2 as float;
"""
addr = 0x50
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_2,
@hidden_gyro_2_alignment1_2.setter
def hidden_gyro_2_alignment1_2(self, new_value):
addr = 0x50
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_3 as float;
"""
addr = 0x51
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_3,
@hidden_gyro_2_alignment1_3.setter
def hidden_gyro_2_alignment1_3(self, new_value):
addr = 0x51
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_1 as float;
"""
addr = 0x52
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_1,
@hidden_gyro_2_alignment2_1.setter
def hidden_gyro_2_alignment2_1(self, new_value):
addr = 0x52
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_2 as float;
"""
addr = 0x53
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_2,
@hidden_gyro_2_alignment2_2.setter
def hidden_gyro_2_alignment2_2(self, new_value):
addr = 0x53
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_3 as float;
"""
addr = 0x54
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_3,
@hidden_gyro_2_alignment2_3.setter
def hidden_gyro_2_alignment2_3(self, new_value):
addr = 0x54
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_1 as float;
"""
addr = 0x55
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_1,
@hidden_gyro_2_alignment3_1.setter
def hidden_gyro_2_alignment3_1(self, new_value):
addr = 0x55
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_2 as float;
"""
addr = 0x56
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_2,
@hidden_gyro_2_alignment3_2.setter
def hidden_gyro_2_alignment3_2(self, new_value):
addr = 0x56
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_3 as float;
"""
addr = 0x57
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_3,
@hidden_gyro_2_alignment3_3.setter
def hidden_gyro_2_alignment3_3(self, new_value):
addr = 0x57
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_0 as float;
"""
addr = 0x58
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_0,
@hidden_c_accel_1_bias_x_pow_0.setter
def hidden_c_accel_1_bias_x_pow_0(self, new_value):
addr = 0x58
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_1 as float;
"""
addr = 0x59
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_1,
@hidden_c_accel_1_bias_x_pow_1.setter
def hidden_c_accel_1_bias_x_pow_1(self, new_value):
addr = 0x59
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_2 as float;
"""
addr = 0x5A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_2,
@hidden_c_accel_1_bias_x_pow_2.setter
def hidden_c_accel_1_bias_x_pow_2(self, new_value):
addr = 0x5A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_3 as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_3,
@hidden_c_accel_1_bias_x_pow_3.setter
def hidden_c_accel_1_bias_x_pow_3(self, new_value):
addr = 0x5B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_0 as float;
"""
addr = 0x5C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_0,
@hidden_c_accel_1_bias_y_pow_0.setter
def hidden_c_accel_1_bias_y_pow_0(self, new_value):
addr = 0x5C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_1 as float;
"""
addr = 0x5D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_1,
@hidden_c_accel_1_bias_y_pow_1.setter
def hidden_c_accel_1_bias_y_pow_1(self, new_value):
addr = 0x5D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_2 as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_2,
@hidden_c_accel_1_bias_y_pow_2.setter
def hidden_c_accel_1_bias_y_pow_2(self, new_value):
addr = 0x5E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_3 as float;
"""
addr = 0x5F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_3,
@hidden_c_accel_1_bias_y_pow_3.setter
def hidden_c_accel_1_bias_y_pow_3(self, new_value):
addr = 0x5F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_0 as float;
"""
addr = 0x60
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_0,
@hidden_c_accel_1_bias_z_pow_0.setter
def hidden_c_accel_1_bias_z_pow_0(self, new_value):
addr = 0x60
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_1 as float;
"""
addr = 0x61
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_1,
@hidden_c_accel_1_bias_z_pow_1.setter
def hidden_c_accel_1_bias_z_pow_1(self, new_value):
addr = 0x61
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_2 as float;
"""
addr = 0x62
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_2,
@hidden_c_accel_1_bias_z_pow_2.setter
def hidden_c_accel_1_bias_z_pow_2(self, new_value):
addr = 0x62
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_3 as float;
"""
addr = 0x63
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_3,
@hidden_c_accel_1_bias_z_pow_3.setter
def hidden_c_accel_1_bias_z_pow_3(self, new_value):
addr = 0x63
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_0 as float;
"""
addr = 0x64
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_0,
@hidden_c_accel_1_scale_x_pow_0.setter
def hidden_c_accel_1_scale_x_pow_0(self, new_value):
addr = 0x64
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_1 as float;
"""
addr = 0x65
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_1,
@hidden_c_accel_1_scale_x_pow_1.setter
def hidden_c_accel_1_scale_x_pow_1(self, new_value):
addr = 0x65
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_2 as float;
"""
addr = 0x66
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_2,
@hidden_c_accel_1_scale_x_pow_2.setter
def hidden_c_accel_1_scale_x_pow_2(self, new_value):
addr = 0x66
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_3 as float;
"""
addr = 0x67
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_3,
@hidden_c_accel_1_scale_x_pow_3.setter
def hidden_c_accel_1_scale_x_pow_3(self, new_value):
addr = 0x67
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_0 as float;
"""
addr = 0x68
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_0,
@hidden_c_accel_1_scale_y_pow_0.setter
def hidden_c_accel_1_scale_y_pow_0(self, new_value):
addr = 0x68
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_1 as float;
"""
addr = 0x69
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_1,
@hidden_c_accel_1_scale_y_pow_1.setter
def hidden_c_accel_1_scale_y_pow_1(self, new_value):
addr = 0x69
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_2 as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_2,
@hidden_c_accel_1_scale_y_pow_2.setter
def hidden_c_accel_1_scale_y_pow_2(self, new_value):
addr = 0x6A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_3 as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_3,
@hidden_c_accel_1_scale_y_pow_3.setter
def hidden_c_accel_1_scale_y_pow_3(self, new_value):
addr = 0x6B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_0 as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_0,
@hidden_c_accel_1_scale_z_pow_0.setter
def hidden_c_accel_1_scale_z_pow_0(self, new_value):
addr = 0x6C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_1 as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_1,
@hidden_c_accel_1_scale_z_pow_1.setter
def hidden_c_accel_1_scale_z_pow_1(self, new_value):
addr = 0x6D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_2 as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_2,
@hidden_c_accel_1_scale_z_pow_2.setter
def hidden_c_accel_1_scale_z_pow_2(self, new_value):
addr = 0x6E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_3 as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_3,
@hidden_c_accel_1_scale_z_pow_3.setter
def hidden_c_accel_1_scale_z_pow_3(self, new_value):
addr = 0x6F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_1 as float;
"""
addr = 0x70
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_1,
@hidden_accel_1_alignment1_1.setter
def hidden_accel_1_alignment1_1(self, new_value):
addr = 0x70
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_2 as float;
"""
addr = 0x71
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_2,
@hidden_accel_1_alignment1_2.setter
def hidden_accel_1_alignment1_2(self, new_value):
addr = 0x71
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_3 as float;
"""
addr = 0x72
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_3,
@hidden_accel_1_alignment1_3.setter
def hidden_accel_1_alignment1_3(self, new_value):
addr = 0x72
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_1 as float;
"""
addr = 0x73
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_1,
@hidden_accel_1_alignment2_1.setter
def hidden_accel_1_alignment2_1(self, new_value):
addr = 0x73
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_2 as float;
"""
addr = 0x74
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_2,
@hidden_accel_1_alignment2_2.setter
def hidden_accel_1_alignment2_2(self, new_value):
addr = 0x74
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_3 as float;
"""
addr = 0x75
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_3,
@hidden_accel_1_alignment2_3.setter
def hidden_accel_1_alignment2_3(self, new_value):
addr = 0x75
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_1 as float;
"""
addr = 0x76
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_1,
@hidden_accel_1_alignment3_1.setter
def hidden_accel_1_alignment3_1(self, new_value):
addr = 0x76
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_2 as float;
"""
addr = 0x77
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_2,
@hidden_accel_1_alignment3_2.setter
def hidden_accel_1_alignment3_2(self, new_value):
addr = 0x77
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_3 as float;
"""
addr = 0x78
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_3,
@hidden_accel_1_alignment3_3.setter
def hidden_accel_1_alignment3_3(self, new_value):
addr = 0x78
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_0 as float;
"""
addr = 0x79
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_0,
@hidden_c_mag_1_bias_x_pow_0.setter
def hidden_c_mag_1_bias_x_pow_0(self, new_value):
addr = 0x79
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_1 as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_1,
@hidden_c_mag_1_bias_x_pow_1.setter
def hidden_c_mag_1_bias_x_pow_1(self, new_value):
addr = 0x7A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_2 as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_2,
@hidden_c_mag_1_bias_x_pow_2.setter
def hidden_c_mag_1_bias_x_pow_2(self, new_value):
addr = 0x7B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_3 as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_3,
@hidden_c_mag_1_bias_x_pow_3.setter
def hidden_c_mag_1_bias_x_pow_3(self, new_value):
addr = 0x7C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_0 as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_0,
@hidden_c_mag_1_bias_y_pow_0.setter
def hidden_c_mag_1_bias_y_pow_0(self, new_value):
addr = 0x7D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_1 as float;
"""
addr = 0x7E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_1,
@hidden_c_mag_1_bias_y_pow_1.setter
def hidden_c_mag_1_bias_y_pow_1(self, new_value):
addr = 0x7E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_2 as float;
"""
addr = 0x7F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_2,
@hidden_c_mag_1_bias_y_pow_2.setter
def hidden_c_mag_1_bias_y_pow_2(self, new_value):
addr = 0x7F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_3 as float;
"""
addr = 0x80
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_3,
@hidden_c_mag_1_bias_y_pow_3.setter
def hidden_c_mag_1_bias_y_pow_3(self, new_value):
addr = 0x80
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_0 as float;
"""
addr = 0x81
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_0,
@hidden_c_mag_1_bias_z_pow_0.setter
def hidden_c_mag_1_bias_z_pow_0(self, new_value):
addr = 0x81
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_1 as float;
"""
addr = 0x82
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_1,
@hidden_c_mag_1_bias_z_pow_1.setter
def hidden_c_mag_1_bias_z_pow_1(self, new_value):
addr = 0x82
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_2 as float;
"""
addr = 0x83
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_2,
@hidden_c_mag_1_bias_z_pow_2.setter
def hidden_c_mag_1_bias_z_pow_2(self, new_value):
addr = 0x83
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_3 as float;
"""
addr = 0x84
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_3,
@hidden_c_mag_1_bias_z_pow_3.setter
def hidden_c_mag_1_bias_z_pow_3(self, new_value):
addr = 0x84
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_0 as float;
"""
addr = 0x85
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_0,
@hidden_c_mag_1_scale_x_pow_0.setter
def hidden_c_mag_1_scale_x_pow_0(self, new_value):
addr = 0x85
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_1 as float;
"""
addr = 0x86
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_1,
@hidden_c_mag_1_scale_x_pow_1.setter
def hidden_c_mag_1_scale_x_pow_1(self, new_value):
addr = 0x86
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_2 as float;
"""
addr = 0x87
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_2,
@hidden_c_mag_1_scale_x_pow_2.setter
def hidden_c_mag_1_scale_x_pow_2(self, new_value):
addr = 0x87
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_3 as float;
"""
addr = 0x88
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_3,
@hidden_c_mag_1_scale_x_pow_3.setter
def hidden_c_mag_1_scale_x_pow_3(self, new_value):
addr = 0x88
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_0 as float;
"""
addr = 0x89
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_0,
@hidden_c_mag_1_scale_y_pow_0.setter
def hidden_c_mag_1_scale_y_pow_0(self, new_value):
addr = 0x89
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_1 as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_1,
@hidden_c_mag_1_scale_y_pow_1.setter
def hidden_c_mag_1_scale_y_pow_1(self, new_value):
addr = 0x8A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_2 as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_2,
@hidden_c_mag_1_scale_y_pow_2.setter
def hidden_c_mag_1_scale_y_pow_2(self, new_value):
addr = 0x8B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_3 as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_3,
@hidden_c_mag_1_scale_y_pow_3.setter
def hidden_c_mag_1_scale_y_pow_3(self, new_value):
addr = 0x8C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_0 as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_0,
@hidden_c_mag_1_scale_z_pow_0.setter
def hidden_c_mag_1_scale_z_pow_0(self, new_value):
addr = 0x8D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_1 as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_1,
@hidden_c_mag_1_scale_z_pow_1.setter
def hidden_c_mag_1_scale_z_pow_1(self, new_value):
addr = 0x8E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_2 as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_2,
@hidden_c_mag_1_scale_z_pow_2.setter
def hidden_c_mag_1_scale_z_pow_2(self, new_value):
addr = 0x8F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_3 as float;
"""
addr = 0x90
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_3,
@hidden_c_mag_1_scale_z_pow_3.setter
def hidden_c_mag_1_scale_z_pow_3(self, new_value):
addr = 0x90
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_1 as float;
"""
addr = 0x91
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_1,
@hidden_mag_1_alignment1_1.setter
def hidden_mag_1_alignment1_1(self, new_value):
addr = 0x91
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_2 as float;
"""
addr = 0x92
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_2,
@hidden_mag_1_alignment1_2.setter
def hidden_mag_1_alignment1_2(self, new_value):
addr = 0x92
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_3 as float;
"""
addr = 0x93
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_3,
@hidden_mag_1_alignment1_3.setter
def hidden_mag_1_alignment1_3(self, new_value):
addr = 0x93
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_1 as float;
"""
addr = 0x94
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_1,
@hidden_mag_1_alignment2_1.setter
def hidden_mag_1_alignment2_1(self, new_value):
addr = 0x94
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_2 as float;
"""
addr = 0x95
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_2,
@hidden_mag_1_alignment2_2.setter
def hidden_mag_1_alignment2_2(self, new_value):
addr = 0x95
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_3 as float;
"""
addr = 0x96
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_3,
@hidden_mag_1_alignment2_3.setter
def hidden_mag_1_alignment2_3(self, new_value):
addr = 0x96
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_1 as float;
"""
addr = 0x97
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_1,
@hidden_mag_1_alignment3_1.setter
def hidden_mag_1_alignment3_1(self, new_value):
addr = 0x97
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_2 as float;
"""
addr = 0x98
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_2,
@hidden_mag_1_alignment3_2.setter
def hidden_mag_1_alignment3_2(self, new_value):
addr = 0x98
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_3 as float;
"""
addr = 0x99
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_3,
@hidden_mag_1_alignment3_3.setter
def hidden_mag_1_alignment3_3(self, new_value):
addr = 0x99
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_x(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_X -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_X as float;
"""
addr = 0x9A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_x,
@hidden_mag_1_reference_x.setter
def hidden_mag_1_reference_x(self, new_value):
addr = 0x9A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_y(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_Y -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_Y as float;
"""
addr = 0x9B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_y,
@hidden_mag_1_reference_y.setter
def hidden_mag_1_reference_y(self, new_value):
addr = 0x9B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_z(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_Z -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_Z as float;
"""
addr = 0x9C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_z,
@hidden_mag_1_reference_z.setter
def hidden_mag_1_reference_z(self, new_value):
addr = 0x9C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_0 as float;
"""
addr = 0x9D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_0,
@hidden_c_mag_2_bias_x_pow_0.setter
def hidden_c_mag_2_bias_x_pow_0(self, new_value):
addr = 0x9D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_1 as float;
"""
addr = 0x9E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_1,
@hidden_c_mag_2_bias_x_pow_1.setter
def hidden_c_mag_2_bias_x_pow_1(self, new_value):
addr = 0x9E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_2 as float;
"""
addr = 0x9F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_2,
@hidden_c_mag_2_bias_x_pow_2.setter
def hidden_c_mag_2_bias_x_pow_2(self, new_value):
addr = 0x9F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_3 as float;
"""
addr = 0xA0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_3,
@hidden_c_mag_2_bias_x_pow_3.setter
def hidden_c_mag_2_bias_x_pow_3(self, new_value):
addr = 0xA0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_0 as float;
"""
addr = 0xA1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_0,
@hidden_c_mag_2_bias_y_pow_0.setter
def hidden_c_mag_2_bias_y_pow_0(self, new_value):
addr = 0xA1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_1 as float;
"""
addr = 0xA2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_1,
@hidden_c_mag_2_bias_y_pow_1.setter
def hidden_c_mag_2_bias_y_pow_1(self, new_value):
addr = 0xA2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_2 as float;
"""
addr = 0xA3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_2,
@hidden_c_mag_2_bias_y_pow_2.setter
def hidden_c_mag_2_bias_y_pow_2(self, new_value):
addr = 0xA3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_3 as float;
"""
addr = 0xA4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_3,
@hidden_c_mag_2_bias_y_pow_3.setter
def hidden_c_mag_2_bias_y_pow_3(self, new_value):
addr = 0xA4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_0 as float;
"""
addr = 0xA5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_0,
@hidden_c_mag_2_bias_z_pow_0.setter
def hidden_c_mag_2_bias_z_pow_0(self, new_value):
addr = 0xA5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_1 as float;
"""
addr = 0xA6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_1,
@hidden_c_mag_2_bias_z_pow_1.setter
def hidden_c_mag_2_bias_z_pow_1(self, new_value):
addr = 0xA6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_2 as float;
"""
addr = 0xA7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_2,
@hidden_c_mag_2_bias_z_pow_2.setter
def hidden_c_mag_2_bias_z_pow_2(self, new_value):
addr = 0xA7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_3 as float;
"""
addr = 0xA8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_3,
@hidden_c_mag_2_bias_z_pow_3.setter
def hidden_c_mag_2_bias_z_pow_3(self, new_value):
addr = 0xA8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_0 as float;
"""
addr = 0xA9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_0,
@hidden_c_mag_2_scale_x_pow_0.setter
def hidden_c_mag_2_scale_x_pow_0(self, new_value):
addr = 0xA9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_1 as float;
"""
addr = 0xAA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_1,
@hidden_c_mag_2_scale_x_pow_1.setter
def hidden_c_mag_2_scale_x_pow_1(self, new_value):
addr = 0xAA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_2 as float;
"""
addr = 0xAB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_2,
@hidden_c_mag_2_scale_x_pow_2.setter
def hidden_c_mag_2_scale_x_pow_2(self, new_value):
addr = 0xAB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_3 as float;
"""
addr = 0xAC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_3,
@hidden_c_mag_2_scale_x_pow_3.setter
def hidden_c_mag_2_scale_x_pow_3(self, new_value):
addr = 0xAC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_0 as float;
"""
addr = 0xAD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_0,
@hidden_c_mag_2_scale_y_pow_0.setter
def hidden_c_mag_2_scale_y_pow_0(self, new_value):
addr = 0xAD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_1 as float;
"""
addr = 0xAE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_1,
@hidden_c_mag_2_scale_y_pow_1.setter
def hidden_c_mag_2_scale_y_pow_1(self, new_value):
addr = 0xAE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_2 as float;
"""
addr = 0xAF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_2,
@hidden_c_mag_2_scale_y_pow_2.setter
def hidden_c_mag_2_scale_y_pow_2(self, new_value):
addr = 0xAF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_3 as float;
"""
addr = 0xB0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_3,
@hidden_c_mag_2_scale_y_pow_3.setter
def hidden_c_mag_2_scale_y_pow_3(self, new_value):
addr = 0xB0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_0 as float;
"""
addr = 0xB1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_0,
@hidden_c_mag_2_scale_z_pow_0.setter
def hidden_c_mag_2_scale_z_pow_0(self, new_value):
addr = 0xB1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_1 as float;
"""
addr = 0xB2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_1,
@hidden_c_mag_2_scale_z_pow_1.setter
def hidden_c_mag_2_scale_z_pow_1(self, new_value):
addr = 0xB2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_2 as float;
"""
addr = 0xB3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_2,
@hidden_c_mag_2_scale_z_pow_2.setter
def hidden_c_mag_2_scale_z_pow_2(self, new_value):
addr = 0xB3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_3 as float;
"""
addr = 0xB4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_3,
@hidden_c_mag_2_scale_z_pow_3.setter
def hidden_c_mag_2_scale_z_pow_3(self, new_value):
addr = 0xB4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_1 as float;
"""
addr = 0xB5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_1,
@hidden_mag_2_alignment1_1.setter
def hidden_mag_2_alignment1_1(self, new_value):
addr = 0xB5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_2 as float;
"""
addr = 0xB6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_2,
@hidden_mag_2_alignment1_2.setter
def hidden_mag_2_alignment1_2(self, new_value):
addr = 0xB6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_3 as float;
"""
addr = 0xB7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_3,
@hidden_mag_2_alignment1_3.setter
def hidden_mag_2_alignment1_3(self, new_value):
addr = 0xB7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_1 as float;
"""
addr = 0xB8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_1,
@hidden_mag_2_alignment2_1.setter
def hidden_mag_2_alignment2_1(self, new_value):
addr = 0xB8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_2 as float;
"""
addr = 0xB9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_2,
@hidden_mag_2_alignment2_2.setter
def hidden_mag_2_alignment2_2(self, new_value):
addr = 0xB9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_3 as float;
"""
addr = 0xBA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_3,
@hidden_mag_2_alignment2_3.setter
def hidden_mag_2_alignment2_3(self, new_value):
addr = 0xBA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_1 as float;
"""
addr = 0xBB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_1,
@hidden_mag_2_alignment3_1.setter
def hidden_mag_2_alignment3_1(self, new_value):
addr = 0xBB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_2 as float;
"""
addr = 0xBC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_2,
@hidden_mag_2_alignment3_2.setter
def hidden_mag_2_alignment3_2(self, new_value):
addr = 0xBC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_3 as float;
"""
addr = 0xBD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_3,
@hidden_mag_2_alignment3_3.setter
def hidden_mag_2_alignment3_3(self, new_value):
addr = 0xBD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_x(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_X -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_X as float;
"""
addr = 0xBE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_x,
@hidden_mag_2_reference_x.setter
def hidden_mag_2_reference_x(self, new_value):
addr = 0xBE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_y(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_Y -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_Y as float;
"""
addr = 0xBF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_y,
@hidden_mag_2_reference_y.setter
def hidden_mag_2_reference_y(self, new_value):
addr = 0xBF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_z(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_Z -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_Z as float;
"""
addr = 0xC0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_z,
@hidden_mag_2_reference_z.setter
def hidden_mag_2_reference_z(self, new_value):
addr = 0xC0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_CONVERSION as float;
"""
addr = 0xC1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_conversion,
@property
def hidden_gyro_2_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_CONVERSION as float;
"""
addr = 0xC2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_conversion,
@property
def hidden_accel_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_CONVERSION as float;
"""
addr = 0xC3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_conversion,
@property
def hidden_mag_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_CONVERSION as float;
"""
addr = 0xC4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_conversion,
@property
def hidden_mag_2_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_CONVERSION as float;
"""
addr = 0xC5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_conversion,
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/shearwater_registers.py | 0.892574 | 0.394872 | shearwater_registers.py | pypi |
from dataclasses import dataclass
@dataclass
class UM7AllRawPacket:
gyro_raw_x: int
gyro_raw_y: int
gyro_raw_z: int
gyro_raw_time: float
accel_raw_x: int
accel_raw_y: int
accel_raw_z: int
accel_raw_time: float
mag_raw_x: int
mag_raw_y: int
mag_raw_z: int
mag_raw_time: float
temperature: float
temperature_time: float
def __repr__(self):
return f"RawPacket("\
f"gyro=[{self.gyro_raw_x:>+5d}, {self.gyro_raw_y:>+5d}, {self.gyro_raw_z:>+5d}], "\
f"gyro_t={self.gyro_raw_time:>6.3f}; " \
f"accel=[{self.accel_raw_x:>+5d}, {self.accel_raw_y:>+5d}, {self.accel_raw_z:>+5d}], " \
f"accel_t={self.accel_raw_time:>6.3f}; " \
f"mag=[{self.mag_raw_x:>+8d}, {self.mag_raw_y:>+8d}, {self.mag_raw_z:>+8d}], " \
f"mag_t={self.mag_raw_time:>6.3f}; " \
f"T={self.temperature:>+3.2f}, " \
f"T_t={self.temperature_time:>6.3f})"
@dataclass
class UM7AllProcPacket:
gyro_proc_x: float
gyro_proc_y: float
gyro_proc_z: float
gyro_proc_time: float
accel_proc_x: float
accel_proc_y: float
accel_proc_z: float
accel_proc_time: float
mag_proc_x: float
mag_proc_y: float
mag_proc_z: float
mag_proc_time: float
def __repr__(self):
return f"ProcPacket("\
f"gyro=[{self.gyro_proc_x:>+8.3f}, {self.gyro_proc_y:>+8.3f}, {self.gyro_proc_z:>+8.3f}], "\
f"gyro_t={self.gyro_proc_time:>6.3f}; " \
f"accel=[{self.accel_proc_x:>+8.3f}, {self.accel_proc_y:>+8.3f}, {self.accel_proc_z:>+8.3f}], " \
f"accel_t={self.accel_proc_time:>6.3f}; " \
f"mag=[{self.mag_proc_x:>+8.6f}, {self.mag_proc_y:>+8.6f}, {self.mag_proc_z:>+8.6f}], " \
f"mag_t={self.mag_proc_time:>6.3f})"
@dataclass
class UM7EulerPacket:
roll: float
pitch: float
yaw: float
roll_rate: float
pitch_rate: float
yaw_rate: float
time_stamp: float
def __repr__(self):
return f"EulerPacket("\
f"roll={self.roll:>+8.3f}; pitch={self.pitch:>+8.3f}; yaw={self.yaw:>+8.3f}; "\
f"roll_rate={self.roll_rate:>+8.3f}; pitch_rate={self.pitch_rate:>+8.3f}; yaw_rate={self.yaw_rate:>+8.3f}; " \
f"time_stamp={self.time_stamp:>6.3f})"
@dataclass
class UM7HealthPacket:
health: int
def __repr__(self):
return f"HealthPacket("\
f"raw_value=0x{self.health:04X} -> " \
f"SATS_USED={(self.health >> 26) & 0x3F}, " \
f"HDOP={(self.health >> 16) & 0x7F}, " \
f"SATS_IN_VIEW={(self.health >> 10) & 0x3F}, " \
f"OVF={bool((self.health >> 8) & 0x01)}, " \
f"MG_N={bool((self.health >> 5) & 0x01)}, " \
f"ACC_N={bool((self.health >> 4) & 0x01)}, " \
f"ACCEL={bool((self.health >> 3) & 0x01)}, "\
f"GYRO={bool((self.health >> 2) & 0x01)}, " \
f"MAG={bool((self.health >> 1) & 0x01)}, " \
f"GPS={bool((self.health >> 0) & 0x01)})"
@dataclass
class UM7RawAccelPacket:
accel_raw_x: int
accel_raw_y: int
accel_raw_z: int
accel_raw_time: float
@dataclass
class UM7RawGyroPacket:
gyro_raw_x: int
gyro_raw_y: int
gyro_raw_z: int
gyro_raw_time: float
@dataclass
class UM7RawMagPacket:
mag_raw_x: int
mag_raw_y: int
mag_raw_z: int
mag_raw_time: float
@dataclass
class UM7TemperaturePacket:
temperature: float
temperature_time: float
@dataclass
class UM7ProcAccelPacket:
accel_proc_x: float
accel_proc_y: float
accel_proc_z: float
accel_proc_time: float
@dataclass
class UM7ProcGyroPacket:
gyro_proc_x: float
gyro_proc_y: float
gyro_proc_z: float
gyro_proc_time: float
@dataclass
class UM7ProcMagPacket:
mag_proc_x: float
mag_proc_y: float
mag_proc_z: float
mag_proc_time: float
@dataclass
class UM7QuaternionPacket:
q_w: float
q_x: float
q_y: float
q_z: float
q_time: float
@dataclass
class UM7EulerPosePacket:
roll: float
pitch: float
yaw: float
roll_rate: float
pitch_rate: float
yaw_rate: float
euler_time: float
position_north: float
position_east: float
position_up: float
position_time: float
@dataclass
class UM7PosePacket:
position_north: float
position_east: float
position_up: float
position_time: float
@dataclass
class UM7VelocityPacket:
velocity_north: float
velocity_east: float
velocity_up: float
velocity_time: float
@dataclass
class UM7GyroBiasPacket:
gyro_bias_x: float
gyro_bias_y: float
gyro_bias_z: float
def __repr__(self):
return f"GyroBiasPacket("\
f"gyro_bias=[{self.gyro_bias_x:>+8.3f}, {self.gyro_bias_y:>+8.3f}, {self.gyro_bias_z:>+8.3f}])"
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/um7_broadcast_packets.py | 0.862757 | 0.436622 | um7_broadcast_packets.py | pypi |
# Author: Dr. Konstantin Selyunin
# License: MIT
# Created: 2022.03.28
import logging
import os.path
import struct
from abc import abstractmethod, ABC
from typing import Union, Tuple
from .rsl_xml_svd.rsl_svd_parser import RslSvdParser
class UM7Registers(ABC):
def __init__(self, **kwargs):
self.svd_parser = RslSvdParser(svd_file=UM7Registers.find_svd('um7.svd'))
@staticmethod
def find_svd(svd_file_name: str):
parent_dir = os.path.join(os.path.dirname(__file__), os.pardir)
for root, dirs, files in os.walk(parent_dir):
if svd_file_name in files:
return os.path.join(root, svd_file_name)
@abstractmethod
def connect(self, *args, **kwargs):
pass
@abstractmethod
def read_register(self, reg_addr: int, **kw) -> bytes:
pass
@abstractmethod
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float], **kw):
pass
@property
def creg_com_settings(self):
"""
The CREG_COM_SETTINGS register is used to set the boards serial port baud rate and to enable (disable) the
automatic transmission of sensor data and estimated states (telemetry).
Payload structure:
[31:28] : BAUD_RATE -- Sets the baud rate of the boards main serial port:
[27:24] : GPS_BAUD -- Sets the baud rate of the UM7 auxiliary serial port:
[8] : GPS -- If set, this bit causes GPS data to be transmitted automatically whenever new GPS data is received. GPS data is stored in registers 125 to 130. These registers will be transmitted in a batch packet of length 6 starting at address 125.
[4] : SAT -- If set, this bit causes satellite details to be transmitted whenever they are provided by the GPS. Satellite information is stored in registers 131 to 136. These registers will be transmitted in a batch packet of length 6 beginning at address 131.
:return: BAUD_RATE as bitField; GPS_BAUD as bitField; GPS as bitField; SAT as bitField;
"""
addr = 0x00
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for BAUD_RATE bit field
baud_rate_val = (reg.raw_value >> 28) & 0x000F
baud_rate_enum = reg.find_field_by(name='BAUD_RATE').find_enum_entry_by(value=baud_rate_val)
# find value for GPS_BAUD bit field
gps_baud_val = (reg.raw_value >> 24) & 0x000F
gps_baud_enum = reg.find_field_by(name='GPS_BAUD').find_enum_entry_by(value=gps_baud_val)
# find value for GPS bit field
gps_val = (reg.raw_value >> 8) & 0x0001
gps_enum = reg.find_field_by(name='GPS').find_enum_entry_by(value=gps_val)
# find value for SAT bit field
sat_val = (reg.raw_value >> 4) & 0x0001
sat_enum = reg.find_field_by(name='SAT').find_enum_entry_by(value=sat_val)
return reg, baud_rate_enum, gps_baud_enum, gps_enum, sat_enum
@creg_com_settings.setter
def creg_com_settings(self, new_value):
addr = 0x00
self.write_register(addr, new_value)
@property
def creg_com_rates1(self):
"""
The CREG_COM_RATES1 register sets desired telemetry transmission rates in Hz for raw accelerometer, gyro, and
magnetometer data. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : RAW_ACCEL_RATE -- Specifies the desired raw accelerometer data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_GYRO_RATE -- Specifies the desired raw gyro data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : RAW_MAG_RATE -- Specifies the desired raw magnetometer data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: RAW_ACCEL_RATE as uint8_t; RAW_GYRO_RATE as uint8_t; RAW_MAG_RATE as uint8_t;
"""
addr = 0x01
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
raw_accel_rate, raw_gyro_rate, raw_mag_rate = struct.unpack('>BBBx', payload[0:4])
return reg, raw_accel_rate, raw_gyro_rate, raw_mag_rate
@creg_com_rates1.setter
def creg_com_rates1(self, new_value):
addr = 0x01
self.write_register(addr, new_value)
@property
def creg_com_rates2(self):
"""
The CREG_COM_RATES2 register sets desired telemetry transmission rates for all raw data and temperature. If
the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : TEMP_RATE -- Specifies the desired broadcast rate for temperature data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_RAW_RATE -- Specifies the desired broadcast rate for all raw sensor data. If set, this overrides the broadcast rate setting for individual raw data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: TEMP_RATE as uint8_t; ALL_RAW_RATE as uint8_t;
"""
addr = 0x02
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
temp_rate, all_raw_rate = struct.unpack('>BxxB', payload[0:4])
return reg, temp_rate, all_raw_rate
@creg_com_rates2.setter
def creg_com_rates2(self, new_value):
addr = 0x02
self.write_register(addr, new_value)
@property
def creg_com_rates3(self):
"""
The CREG_COM_RATES3 register sets desired telemetry transmission rates for processed sensor data. If the
specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : PROC_ACCEL_RATE -- Specifies the desired broadcast rate for processed accelerometer data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : PROC_GYRO_RATE -- Specifies the desired broadcast rate for processed rate gyro data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : PROC_MAG_RATE -- Specifies the desired broadcast rate for processed magnetometer data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_ACCEL_RATE as uint8_t; PROC_GYRO_RATE as uint8_t; PROC_MAG_RATE as uint8_t;
"""
addr = 0x03
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES3')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_accel_rate, proc_gyro_rate, proc_mag_rate = struct.unpack('>BBBx', payload[0:4])
return reg, proc_accel_rate, proc_gyro_rate, proc_mag_rate
@creg_com_rates3.setter
def creg_com_rates3(self, new_value):
addr = 0x03
self.write_register(addr, new_value)
@property
def creg_com_rates4(self):
"""
The CREG_COM_RATES4 register defines the desired telemetry transmission rates for all processed data. If the
specified rate is 0, then no data is transmitted.
Payload structure:
[7:0] : ALL_PROC_RATE -- Specifies the desired broadcast rate for raw all processed data. If set, this overrides the broadcast rate setting for individual processed data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: ALL_PROC_RATE as uint8_t;
"""
addr = 0x04
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES4')
reg.raw_value, = struct.unpack('>xxxB', payload[0:4])
all_proc_rate, = struct.unpack('>xxxB', payload[0:4])
return reg, all_proc_rate,
@creg_com_rates4.setter
def creg_com_rates4(self, new_value):
addr = 0x04
self.write_register(addr, new_value)
@property
def creg_com_rates5(self):
"""
The CREG_COM_RATES5 register sets desired telemetry transmission rates for quaternions, Euler Angles,
position, and velocity estimates. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : QUAT_RATE -- Specifies the desired broadcast rate for quaternion data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : EULER_RATE -- Specifies the desired broadcast rate for Euler Angle data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : POSITION_RATE -- Specifies the desired broadcast rate position. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : VELOCITY_RATE -- Specifies the desired broadcast rate for velocity. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: QUAT_RATE as uint8_t; EULER_RATE as uint8_t; POSITION_RATE as uint8_t; VELOCITY_RATE as uint8_t;
"""
addr = 0x05
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES5')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_rate, euler_rate, position_rate, velocity_rate = struct.unpack('>BBBB', payload[0:4])
return reg, quat_rate, euler_rate, position_rate, velocity_rate
@creg_com_rates5.setter
def creg_com_rates5(self, new_value):
addr = 0x05
self.write_register(addr, new_value)
@property
def creg_com_rates6(self):
"""
The CREG_COM_RATES6 register sets desired telemetry transmission rates for pose (Euler/position packet),
health, and gyro bias estimates for the gyro 1 and gyro 2. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : POSE_RATE -- Specifies the desired broadcast rate for pose (Euler Angle and position) data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[19:16] : HEALTH_RATE -- Specifies the desired broadcast rate for the sensor health packet.
[15:8] : GYRO_BIAS_RATE -- Specifies the desired broadcast rate for gyro bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: POSE_RATE as uint8_t; HEALTH_RATE as bitField; GYRO_BIAS_RATE as uint8_t;
"""
addr = 0x06
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pose_rate, gyro_bias_rate = struct.unpack('>BxBx', payload[0:4])
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for HEALTH_RATE bit field
health_rate_val = (reg.raw_value >> 16) & 0x000F
health_rate_enum = reg.find_field_by(name='HEALTH_RATE').find_enum_entry_by(value=health_rate_val)
return reg, pose_rate, gyro_bias_rate, reg, health_rate_enum
@creg_com_rates6.setter
def creg_com_rates6(self, new_value):
addr = 0x06
self.write_register(addr, new_value)
@property
def creg_com_rates7(self):
"""
The CREG_COM_RATES7 register sets desired telemetry transmission rates in Hz for NMEA packets.
Payload structure:
[31:28] : NMEA_HEALTH_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style health packet.
[27:24] : NMEA_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style pose (Euler Angle/position) packet.
[23:20] : NMEA_ATTITUDE_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style attitude packet.
[19:16] : NMEA_SENSOR_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style sensor data packet.
[15:12] : NMEA_RATES_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style rate data packet.
[11:8] : NMEA_GPS_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style GPS pose packet.
[7:4] : NMEA_QUAT_RATE -- Specifies the desired broadcast rate for Redshift Labs Pty Ltd NMEA-style quaternion packet.
:return: NMEA_HEALTH_RATE as bitField; NMEA_POSE_RATE as bitField; NMEA_ATTITUDE_RATE as bitField; NMEA_SENSOR_RATE as bitField; NMEA_RATES_RATE as bitField; NMEA_GPS_POSE_RATE as bitField; NMEA_QUAT_RATE as bitField;
"""
addr = 0x07
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES7')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for NMEA_HEALTH_RATE bit field
nmea_health_rate_val = (reg.raw_value >> 28) & 0x000F
nmea_health_rate_enum = reg.find_field_by(name='NMEA_HEALTH_RATE').find_enum_entry_by(value=nmea_health_rate_val)
# find value for NMEA_POSE_RATE bit field
nmea_pose_rate_val = (reg.raw_value >> 24) & 0x000F
nmea_pose_rate_enum = reg.find_field_by(name='NMEA_POSE_RATE').find_enum_entry_by(value=nmea_pose_rate_val)
# find value for NMEA_ATTITUDE_RATE bit field
nmea_attitude_rate_val = (reg.raw_value >> 20) & 0x000F
nmea_attitude_rate_enum = reg.find_field_by(name='NMEA_ATTITUDE_RATE').find_enum_entry_by(value=nmea_attitude_rate_val)
# find value for NMEA_SENSOR_RATE bit field
nmea_sensor_rate_val = (reg.raw_value >> 16) & 0x000F
nmea_sensor_rate_enum = reg.find_field_by(name='NMEA_SENSOR_RATE').find_enum_entry_by(value=nmea_sensor_rate_val)
# find value for NMEA_RATES_RATE bit field
nmea_rates_rate_val = (reg.raw_value >> 12) & 0x000F
nmea_rates_rate_enum = reg.find_field_by(name='NMEA_RATES_RATE').find_enum_entry_by(value=nmea_rates_rate_val)
# find value for NMEA_GPS_POSE_RATE bit field
nmea_gps_pose_rate_val = (reg.raw_value >> 8) & 0x000F
nmea_gps_pose_rate_enum = reg.find_field_by(name='NMEA_GPS_POSE_RATE').find_enum_entry_by(value=nmea_gps_pose_rate_val)
# find value for NMEA_QUAT_RATE bit field
nmea_quat_rate_val = (reg.raw_value >> 4) & 0x000F
nmea_quat_rate_enum = reg.find_field_by(name='NMEA_QUAT_RATE').find_enum_entry_by(value=nmea_quat_rate_val)
return reg, nmea_health_rate_enum, nmea_pose_rate_enum, nmea_attitude_rate_enum, nmea_sensor_rate_enum, nmea_rates_rate_enum, nmea_gps_pose_rate_enum, nmea_quat_rate_enum
@creg_com_rates7.setter
def creg_com_rates7(self, new_value):
addr = 0x07
self.write_register(addr, new_value)
@property
def creg_misc_settings(self):
"""
This register contains miscellaneous filter and sensor control options.
Payload structure:
[8] : PPS -- If set, this bit causes the TX2 pin on the IO Expansion header to be used as the PPS input from an external GPS module. PPS pulses will then be used to synchronize the system clock to UTC time of day.
[2] : ZG -- If set, this bit causes the device to attempt to measure the rate gyro bias on startup. The sensor must be stationary on startup for this feature to work properly.
[1] : Q -- If this bit is set, the sensor will run in quaternion mode instead of Euler Angle mode.
[0] : MAG -- If set, the magnetometer will be used in state updates.
:return: PPS as bitField; ZG as bitField; Q as bitField; MAG as bitField;
"""
addr = 0x08
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MISC_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for PPS bit field
pps_val = (reg.raw_value >> 8) & 0x0001
pps_enum = reg.find_field_by(name='PPS').find_enum_entry_by(value=pps_val)
# find value for ZG bit field
zg_val = (reg.raw_value >> 2) & 0x0001
zg_enum = reg.find_field_by(name='ZG').find_enum_entry_by(value=zg_val)
# find value for Q bit field
q_val = (reg.raw_value >> 1) & 0x0001
q_enum = reg.find_field_by(name='Q').find_enum_entry_by(value=q_val)
# find value for MAG bit field
mag_val = (reg.raw_value >> 0) & 0x0001
mag_enum = reg.find_field_by(name='MAG').find_enum_entry_by(value=mag_val)
return reg, pps_enum, zg_enum, q_enum, mag_enum
@creg_misc_settings.setter
def creg_misc_settings(self, new_value):
addr = 0x08
self.write_register(addr, new_value)
@property
def creg_home_north(self):
"""
This register sets the north home latitude in degrees, used to convert GPS coordinates to position in meters
from home.
Payload structure:
[31:0] : SET_HOME_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: SET_HOME_NORTH as float;
"""
addr = 0x09
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_HOME_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
set_home_north, = struct.unpack('>f', payload[0:4])
return reg, set_home_north,
@creg_home_north.setter
def creg_home_north(self, new_value):
addr = 0x09
self.write_register(addr, new_value)
@property
def creg_home_east(self):
"""
This register sets the east home longitude in degrees, used to convert GPS coordinates to position in meters
from home.
Payload structure:
[31:0] : SET_HOME_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: SET_HOME_EAST as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_HOME_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
set_home_east, = struct.unpack('>f', payload[0:4])
return reg, set_home_east,
@creg_home_east.setter
def creg_home_east(self, new_value):
addr = 0x0A
self.write_register(addr, new_value)
@property
def creg_home_up(self):
"""
This register sets the home altitude in meters. Used to convert GPS coordinates to position in meters from
home.
Payload structure:
[31:0] : SET_HOME_UP -- Altitude Position (32-bit IEEE Floating Point Value)
:return: SET_HOME_UP as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_HOME_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
set_home_up, = struct.unpack('>f', payload[0:4])
return reg, set_home_up,
@creg_home_up.setter
def creg_home_up(self, new_value):
addr = 0x0B
self.write_register(addr, new_value)
@property
def creg_gyro_trim_x(self):
"""
This register sets the x-axis rate gyro trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_TRIM_X as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_trim_x,
@creg_gyro_trim_x.setter
def creg_gyro_trim_x(self, new_value):
addr = 0x0C
self.write_register(addr, new_value)
@property
def creg_gyro_trim_y(self):
"""
This register sets the y-axis rate gyro trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_TRIM_Y as float;
"""
addr = 0x0D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_trim_y,
@creg_gyro_trim_y.setter
def creg_gyro_trim_y(self, new_value):
addr = 0x0D
self.write_register(addr, new_value)
@property
def creg_gyro_trim_z(self):
"""
This register sets the z-axis rate gyro trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_TRIM_Z as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_trim_z,
@creg_gyro_trim_z.setter
def creg_gyro_trim_z(self, new_value):
addr = 0x0E
self.write_register(addr, new_value)
@property
def creg_mag_cal1_1(self):
"""
Row 1, Column 1 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL1_1 as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_cal1_1,
@creg_mag_cal1_1.setter
def creg_mag_cal1_1(self, new_value):
addr = 0x0F
self.write_register(addr, new_value)
@property
def creg_mag_cal1_2(self):
"""
Row 1, Column 2 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL1_2 as float;
"""
addr = 0x10
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_cal1_2,
@creg_mag_cal1_2.setter
def creg_mag_cal1_2(self, new_value):
addr = 0x10
self.write_register(addr, new_value)
@property
def creg_mag_cal1_3(self):
"""
Row 1, Column 3 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL1_3 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_cal1_3,
@creg_mag_cal1_3.setter
def creg_mag_cal1_3(self, new_value):
addr = 0x11
self.write_register(addr, new_value)
@property
def creg_mag_cal2_1(self):
"""
Row 2, Column 1 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL2_1 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_cal2_1,
@creg_mag_cal2_1.setter
def creg_mag_cal2_1(self, new_value):
addr = 0x12
self.write_register(addr, new_value)
@property
def creg_mag_cal2_2(self):
"""
Row 2, Column 2 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL2_2 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_cal2_2,
@creg_mag_cal2_2.setter
def creg_mag_cal2_2(self, new_value):
addr = 0x13
self.write_register(addr, new_value)
@property
def creg_mag_cal2_3(self):
"""
Row 2, Column 3 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL2_3 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_cal2_3,
@creg_mag_cal2_3.setter
def creg_mag_cal2_3(self, new_value):
addr = 0x14
self.write_register(addr, new_value)
@property
def creg_mag_cal3_1(self):
"""
Row 3, Column 1 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL3_1 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_cal3_1,
@creg_mag_cal3_1.setter
def creg_mag_cal3_1(self, new_value):
addr = 0x15
self.write_register(addr, new_value)
@property
def creg_mag_cal3_2(self):
"""
Row 3, Column 2 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL3_2 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_cal3_2,
@creg_mag_cal3_2.setter
def creg_mag_cal3_2(self, new_value):
addr = 0x16
self.write_register(addr, new_value)
@property
def creg_mag_cal3_3(self):
"""
Row 3, Column 3 of magnetometer calibration matrix.
Payload structure:
[31:0] : MAG_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_CAL3_3 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_cal3_3,
@creg_mag_cal3_3.setter
def creg_mag_cal3_3(self, new_value):
addr = 0x17
self.write_register(addr, new_value)
@property
def creg_mag_bias_x(self):
"""
This register stores a bias term for the magnetometer x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_BIAS_X as float;
"""
addr = 0x18
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_bias_x,
@creg_mag_bias_x.setter
def creg_mag_bias_x(self, new_value):
addr = 0x18
self.write_register(addr, new_value)
@property
def creg_mag_bias_y(self):
"""
This register stores a bias term for the magnetometer y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_BIAS_Y as float;
"""
addr = 0x19
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_bias_y,
@creg_mag_bias_y.setter
def creg_mag_bias_y(self, new_value):
addr = 0x19
self.write_register(addr, new_value)
@property
def creg_mag_bias_z(self):
"""
This register stores a bias term for the magnetometer z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_BIAS_Z as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_bias_z,
@creg_mag_bias_z.setter
def creg_mag_bias_z(self, new_value):
addr = 0x1A
self.write_register(addr, new_value)
@property
def creg_accel_cal1_1(self):
"""
Row 1, Column 1 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL1_1 as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, accel_cal1_1,
@creg_accel_cal1_1.setter
def creg_accel_cal1_1(self, new_value):
addr = 0x1B
self.write_register(addr, new_value)
@property
def creg_accel_cal1_2(self):
"""
Row 1, Column 2 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL1_2 as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, accel_cal1_2,
@creg_accel_cal1_2.setter
def creg_accel_cal1_2(self, new_value):
addr = 0x1C
self.write_register(addr, new_value)
@property
def creg_accel_cal1_3(self):
"""
Row 1, Column 3 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL1_3 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, accel_cal1_3,
@creg_accel_cal1_3.setter
def creg_accel_cal1_3(self, new_value):
addr = 0x1D
self.write_register(addr, new_value)
@property
def creg_accel_cal2_1(self):
"""
Row 2, Column 1 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL2_1 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, accel_cal2_1,
@creg_accel_cal2_1.setter
def creg_accel_cal2_1(self, new_value):
addr = 0x1E
self.write_register(addr, new_value)
@property
def creg_accel_cal2_2(self):
"""
Row 2, Column 2 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL2_2 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, accel_cal2_2,
@creg_accel_cal2_2.setter
def creg_accel_cal2_2(self, new_value):
addr = 0x1F
self.write_register(addr, new_value)
@property
def creg_accel_cal2_3(self):
"""
Row 2, Column 3 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL2_3 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, accel_cal2_3,
@creg_accel_cal2_3.setter
def creg_accel_cal2_3(self, new_value):
addr = 0x20
self.write_register(addr, new_value)
@property
def creg_accel_cal3_1(self):
"""
Row 3, Column 1 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL3_1 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, accel_cal3_1,
@creg_accel_cal3_1.setter
def creg_accel_cal3_1(self, new_value):
addr = 0x21
self.write_register(addr, new_value)
@property
def creg_accel_cal3_2(self):
"""
Row 3, Column 2 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL3_2 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, accel_cal3_2,
@creg_accel_cal3_2.setter
def creg_accel_cal3_2(self, new_value):
addr = 0x22
self.write_register(addr, new_value)
@property
def creg_accel_cal3_3(self):
"""
Row 3, Column 3 of accelerometer calibration matrix.
Payload structure:
[31:0] : ACCEL_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_CAL3_3 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, accel_cal3_3,
@creg_accel_cal3_3.setter
def creg_accel_cal3_3(self, new_value):
addr = 0x23
self.write_register(addr, new_value)
@property
def creg_accel_bias_x(self):
"""
This register stores a bias term for the accelerometer x-axis for bias calibration. This term can be computed
by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_BIAS_X -- 32-bit IEEE Floating Point Value
:return: ACCEL_BIAS_X as float;
"""
addr = 0x24
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_bias_x, = struct.unpack('>f', payload[0:4])
return reg, accel_bias_x,
@creg_accel_bias_x.setter
def creg_accel_bias_x(self, new_value):
addr = 0x24
self.write_register(addr, new_value)
@property
def creg_accel_bias_y(self):
"""
This register stores a bias term for the accelerometer y-axis for bias calibration. This term can be computed
by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: ACCEL_BIAS_Y as float;
"""
addr = 0x25
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_bias_y, = struct.unpack('>f', payload[0:4])
return reg, accel_bias_y,
@creg_accel_bias_y.setter
def creg_accel_bias_y(self, new_value):
addr = 0x25
self.write_register(addr, new_value)
@property
def creg_accel_bias_z(self):
"""
This register stores a bias term for the accelerometer z-axis for bias calibration. This term can be computed
by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: ACCEL_BIAS_Z as float;
"""
addr = 0x26
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_bias_z, = struct.unpack('>f', payload[0:4])
return reg, accel_bias_z,
@creg_accel_bias_z.setter
def creg_accel_bias_z(self, new_value):
addr = 0x26
self.write_register(addr, new_value)
@property
def dreg_health(self):
"""
The health register reports the current status of the GPS module and the other sensors on the board.
Monitoring the health register is the easiest way to monitor the quality of the GPS lock and to watch for
other problems that could affect the behavior of the board.
Payload structure:
[31:26] : SATS_USED -- Reports the number of satellites used in the position solution.
[25:16] : HDOP -- Reports the horizontal dilution of precision (HDOP) reported by the GPS. The actual HDOP value is equal to the contents of the HDOP bits divided by 10.
[15:10] : SATS_IN_VIEW -- Reports the number of satellites in view.
[8] : OVF -- Overflow bit. This bit is set if the UM7 is attempting to transmit data over the serial port faster than is allowed given the baud-rate. If this bit is set, reduce broadcast rates in the COM_RATES registers.
[5] : MG_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[4] : ACC_N -- This bit is set if the sensor detects that the norm of the accelerometer measurement is too far away from 1G to be used (i.e. during aggressive acceleration or high vibration).
[3] : ACCEL -- This bit will be set if the accelerometer fails to initialize on startup.
[2] : GYRO -- This bit will be set if the rate gyro fails to initialize on startup.
[1] : MAG -- This bit will be set if the magnetometer fails to initialize on startup.
[0] : GPS -- This bit is set if the GPS fails to send a packet for more than two seconds. If a GPS packet is ever received, this bit is cleared.
:return: SATS_USED as bitField; HDOP as bitField; SATS_IN_VIEW as bitField; OVF as bitField; MG_N as bitField; ACC_N as bitField; ACCEL as bitField; GYRO as bitField; MAG as bitField; GPS as bitField;
"""
addr = 0x55
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_HEALTH')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for SATS_USED bit field
sats_used_val = (reg.raw_value >> 26) & 0x003F
sats_used_enum = reg.find_field_by(name='SATS_USED').find_enum_entry_by(value=sats_used_val)
# find value for HDOP bit field
hdop_val = (reg.raw_value >> 16) & 0x03FF
hdop_enum = reg.find_field_by(name='HDOP').find_enum_entry_by(value=hdop_val)
# find value for SATS_IN_VIEW bit field
sats_in_view_val = (reg.raw_value >> 10) & 0x003F
sats_in_view_enum = reg.find_field_by(name='SATS_IN_VIEW').find_enum_entry_by(value=sats_in_view_val)
# find value for OVF bit field
ovf_val = (reg.raw_value >> 8) & 0x0001
ovf_enum = reg.find_field_by(name='OVF').find_enum_entry_by(value=ovf_val)
# find value for MG_N bit field
mg_n_val = (reg.raw_value >> 5) & 0x0001
mg_n_enum = reg.find_field_by(name='MG_N').find_enum_entry_by(value=mg_n_val)
# find value for ACC_N bit field
acc_n_val = (reg.raw_value >> 4) & 0x0001
acc_n_enum = reg.find_field_by(name='ACC_N').find_enum_entry_by(value=acc_n_val)
# find value for ACCEL bit field
accel_val = (reg.raw_value >> 3) & 0x0001
accel_enum = reg.find_field_by(name='ACCEL').find_enum_entry_by(value=accel_val)
# find value for GYRO bit field
gyro_val = (reg.raw_value >> 2) & 0x0001
gyro_enum = reg.find_field_by(name='GYRO').find_enum_entry_by(value=gyro_val)
# find value for MAG bit field
mag_val = (reg.raw_value >> 1) & 0x0001
mag_enum = reg.find_field_by(name='MAG').find_enum_entry_by(value=mag_val)
# find value for GPS bit field
gps_val = (reg.raw_value >> 0) & 0x0001
gps_enum = reg.find_field_by(name='GPS').find_enum_entry_by(value=gps_val)
return reg, sats_used_enum, hdop_enum, sats_in_view_enum, ovf_enum, mg_n_enum, acc_n_enum, accel_enum, gyro_enum, mag_enum, gps_enum
@property
def dreg_gyro_raw_xy(self):
"""
Contains raw X and Y axis rate gyro data.
Payload structure:
[31:16] : GYRO_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_RAW_X as int16_t; GYRO_RAW_Y as int16_t;
"""
addr = 0x56
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_raw_x, gyro_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_raw_x, gyro_raw_y
@property
def dreg_gyro_raw_z(self):
"""
Contains raw Z axis rate gyro data.
Payload structure:
[31:16] : GYRO_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_RAW_Z as int16_t;
"""
addr = 0x57
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_raw_z,
@property
def dreg_gyro_raw_time(self):
"""
Contains time at which the last rate gyro data was acquired.
Payload structure:
[31:0] : GYRO_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_RAW_TIME as float;
"""
addr = 0x58
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_raw_time,
@property
def dreg_accel_raw_xy(self):
"""
Contains raw X and Y axis accelerometer data.
Payload structure:
[31:16] : ACCEL_RAW_X -- Accel X (2s complement 16-bit integer)
[15:0] : ACCEL_RAW_Y -- Accel Y (2s complement 16-bit integer)
:return: ACCEL_RAW_X as int16_t; ACCEL_RAW_Y as int16_t;
"""
addr = 0x59
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
accel_raw_x, accel_raw_y = struct.unpack('>hh', payload[0:4])
return reg, accel_raw_x, accel_raw_y
@property
def dreg_accel_raw_z(self):
"""
Contains raw Z axis accelerometer data.
Payload structure:
[31:16] : ACCEL_RAW_Z -- Accel Z (2s complement 16-bit integer)
:return: ACCEL_RAW_Z as int16_t;
"""
addr = 0x5A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
accel_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, accel_raw_z,
@property
def dreg_accel_raw_time(self):
"""
Contains time at which the last raw data sample for the accelerometer was acquired.
Payload structure:
[31:0] : ACCEL_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: ACCEL_RAW_TIME as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_raw_time, = struct.unpack('>f', payload[0:4])
return reg, accel_raw_time,
@property
def dreg_mag_raw_xy(self):
"""
Contains raw X and Y axis magnetometer data.
Payload structure:
[31:16] : MAG_RAW_X -- Magnetometer X (2s complement 16-bit integer)
[15:0] : MAG_RAW_Y -- Magnetometer Y (2s complement 16-bit integer)
:return: MAG_RAW_X as int16_t; MAG_RAW_Y as int16_t;
"""
addr = 0x5C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
mag_raw_x, mag_raw_y = struct.unpack('>hh', payload[0:4])
return reg, mag_raw_x, mag_raw_y
@property
def dreg_mag_raw_z(self):
"""
Contains raw Z axis magnetometer data.
Payload structure:
[31:16] : MAG_RAW_Z -- Magnetometer Z (2s complement 16-bit integer)
:return: MAG_RAW_Z as int16_t;
"""
addr = 0x5D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
mag_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, mag_raw_z,
@property
def dreg_mag_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer was acquired.
Payload structure:
[31:0] : MAG_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_RAW_TIME as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_raw_time,
@property
def dreg_temperature(self):
"""
Contains the temperature output of the onboard temperature sensor.
Payload structure:
[31:0] : TEMPERATURE -- Temperature in degrees Celcius (32-bit IEEE Floating Point)
:return: TEMPERATURE as float;
"""
addr = 0x5F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature, = struct.unpack('>f', payload[0:4])
return reg, temperature,
@property
def dreg_temperature_time(self):
"""
Contains time at which the last temperature was acquired.
Payload structure:
[31:0] : TEMPERATURE_TIME -- 32-bit IEEE Floating Point Value
:return: TEMPERATURE_TIME as float;
"""
addr = 0x60
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature_time, = struct.unpack('>f', payload[0:4])
return reg, temperature_time,
@property
def dreg_gyro_proc_x(self):
"""
Contains the actual measured angular rate from the gyro for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_X as float;
"""
addr = 0x61
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_x,
@property
def dreg_gyro_proc_y(self):
"""
Contains the actual measured angular rate from the gyro for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_Y as float;
"""
addr = 0x62
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_y,
@property
def dreg_gyro_proc_z(self):
"""
Contains the actual measured angular rate from the gyro for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_Z as float;
"""
addr = 0x63
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_z,
@property
def dreg_gyro_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro was measured.
Payload structure:
[31:0] : GYRO_PROC_TIME -- Gyro time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_PROC_TIME as float;
"""
addr = 0x64
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_proc_time,
@property
def dreg_accel_proc_x(self):
"""
Contains the actual measured acceleration from the accelerometer for the x axis in m/s2 after calibration has
been applied.
Payload structure:
[31:0] : ACCEL_PROC_X -- Acceleration X in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_X as float;
"""
addr = 0x65
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_x, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_x,
@property
def dreg_accel_proc_y(self):
"""
Contains the actual measured acceleration from the accelerometer for the y axis in m/s2 after calibration has
been applied.
Payload structure:
[31:0] : ACCEL_PROC_Y -- Acceleration Y in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_Y as float;
"""
addr = 0x66
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_y, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_y,
@property
def dreg_accel_proc_z(self):
"""
Contains the actual measured acceleration from the accelerometer for the z axis in m/s2 after calibration has
been applied.
Payload structure:
[31:0] : ACCEL_PROC_Z -- Acceleration Z in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_Z as float;
"""
addr = 0x67
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_z, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_z,
@property
def dreg_accel_proc_time(self):
"""
Contains the time at which the last acceleration data from the accelerometer was measured.
Payload structure:
[31:0] : ACCEL_PROC_TIME -- Accelerometer time stamp (32-bit IEEE Floating Point Value)
:return: ACCEL_PROC_TIME as float;
"""
addr = 0x68
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_proc_time, = struct.unpack('>f', payload[0:4])
return reg, accel_proc_time,
@property
def dreg_mag_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer for the x axis after calibration has been
applied.
Payload structure:
[31:0] : MAG_PROC_X -- Magnetometer X (32-bit IEEE Floating Point Value)
:return: MAG_PROC_X as float;
"""
addr = 0x69
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_x,
@property
def dreg_mag_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer for the y axis after calibration has been
applied.
Payload structure:
[31:0] : MAG_PROC_Y -- Magnetometer Y (32-bit IEEE Floating Point Value)
:return: MAG_PROC_Y as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_y,
@property
def dreg_mag_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer for the z axis after calibration has been
applied.
Payload structure:
[31:0] : MAG_PROC_Z -- Magnetometer Z (32-bit IEEE Floating Point Value)
:return: MAG_PROC_Z as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_z,
@property
def dreg_mag_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer data was acquired.
Payload structure:
[31:0] : MAG_PROC_TIME -- Magnetometer time stamp (32-bit IEEE Floating Point Value)
:return: MAG_PROC_TIME as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_proc_time,
@property
def dreg_quat_ab(self):
"""
Contains the first two components (a and b) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_A -- First quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_B -- Second quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_A as int16_t; QUAT_B as int16_t;
"""
addr = 0x6D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_AB')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_a, quat_b = struct.unpack('>hh', payload[0:4])
return reg, quat_a, quat_b
@property
def dreg_quat_cd(self):
"""
Contains the second two components (c and d) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_C -- Third quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_D -- Fourth quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_C as int16_t; QUAT_D as int16_t;
"""
addr = 0x6E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_CD')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_c, quat_d = struct.unpack('>hh', payload[0:4])
return reg, quat_c, quat_d
@property
def dreg_quat_time(self):
"""
Contains the time that the quaternion attitude was estimated.
Payload structure:
[31:0] : QUAT_TIME -- Quaternion time (32-bit IEEE Floating Point Value)
:return: QUAT_TIME as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
quat_time, = struct.unpack('>f', payload[0:4])
return reg, quat_time,
@property
def dreg_euler_phi_theta(self):
"""
Contains the pitch and roll angle estimates.
Payload structure:
[31:16] : PHI -- Roll angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
[15:0] : THETA -- Pitch angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PHI as int16_t; THETA as int16_t;
"""
addr = 0x70
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi, theta = struct.unpack('>hh', payload[0:4])
return reg, phi, theta
@property
def dreg_euler_psi(self):
"""
Contains the yaw angle estimate.
Payload structure:
[31:16] : PSI -- Yaw angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PSI as int16_t;
"""
addr = 0x71
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi, = struct.unpack('>hxx', payload[0:4])
return reg, psi,
@property
def dreg_euler_phi_theta_dot(self):
"""
Contains the pitch and roll rate estimates.
Payload structure:
[31:16] : PHI_DOT -- Roll rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
[15:0] : THETA_DOT -- Pitch rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PHI_DOT as int16_t; THETA_DOT as int16_t;
"""
addr = 0x72
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA_DOT')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi_dot, theta_dot = struct.unpack('>hh', payload[0:4])
return reg, phi_dot, theta_dot
@property
def dreg_euler_psi_dot(self):
"""
Contains the yaw rate estimate.
Payload structure:
[31:16] : PSI_DOT -- Yaw rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PSI_DOT as int16_t;
"""
addr = 0x73
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI_DOT')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi_dot, = struct.unpack('>hxx', payload[0:4])
return reg, psi_dot,
@property
def dreg_euler_time(self):
"""
Contains the time that the Euler Angles were estimated.
Payload structure:
[31:0] : EULER_TIME -- Euler time (32-bit IEEE Floating Point Value)
:return: EULER_TIME as float;
"""
addr = 0x74
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
euler_time, = struct.unpack('>f', payload[0:4])
return reg, euler_time,
@property
def dreg_position_north(self):
"""
Contains the measured north position in meters from the latitude specified in CREG_HOME_NORTH.
Payload structure:
[31:0] : POSITION_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: POSITION_NORTH as float;
"""
addr = 0x75
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_north, = struct.unpack('>f', payload[0:4])
return reg, position_north,
@property
def dreg_position_east(self):
"""
Contains the measured east position in meters from the longitude specified in CREG_HOME_EAST.
Payload structure:
[31:0] : POSITION_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: POSITION_EAST as float;
"""
addr = 0x76
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_east, = struct.unpack('>f', payload[0:4])
return reg, position_east,
@property
def dreg_position_up(self):
"""
Contains the measured altitude in meters from the altitude specified in CREG_HOME_UP.
Payload structure:
[31:0] : POSITION_UP -- Altitude (32-bit IEEE Floating Point Value)
:return: POSITION_UP as float;
"""
addr = 0x77
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_up, = struct.unpack('>f', payload[0:4])
return reg, position_up,
@property
def dreg_position_time(self):
"""
Contains the time at which the position was acquired.
Payload structure:
[31:0] : POSITION_TIME -- Position Time (32-bit IEEE Floating Point Value)
:return: POSITION_TIME as float;
"""
addr = 0x78
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_time, = struct.unpack('>f', payload[0:4])
return reg, position_time,
@property
def dreg_velocity_north(self):
"""
Contains the measured north velocity in m/s.
Payload structure:
[31:0] : VELOCITY_NORTH -- North Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_NORTH as float;
"""
addr = 0x79
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_north, = struct.unpack('>f', payload[0:4])
return reg, velocity_north,
@property
def dreg_velocity_east(self):
"""
Contains the measured east velocity in m/s.
Payload structure:
[31:0] : VELOCITY_EAST -- East Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_EAST as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_east, = struct.unpack('>f', payload[0:4])
return reg, velocity_east,
@property
def dreg_velocity_up(self):
"""
Contains the measured altitude velocity in m/s.
Payload structure:
[31:0] : VELOCITY_UP -- Altitude Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_UP as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_up, = struct.unpack('>f', payload[0:4])
return reg, velocity_up,
@property
def dreg_velocity_time(self):
"""
Contains the time at which the velocity was measured.
Payload structure:
[31:0] : VELOCITY_TIME -- Velocity time (32-bit IEEE Floating Point Value)
:return: VELOCITY_TIME as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_time, = struct.unpack('>f', payload[0:4])
return reg, velocity_time,
@property
def dreg_gps_latitude(self):
"""
Contains the GPS-reported latitude in degrees.
Payload structure:
[31:0] : GPS_LATITUDE -- GPS Latitude (32-bit IEEE Floating Point Value)
:return: GPS_LATITUDE as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_LATITUDE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_latitude, = struct.unpack('>f', payload[0:4])
return reg, gps_latitude,
@property
def dreg_gps_longitude(self):
"""
Contains the GPS-reported longitude in degrees.
Payload structure:
[31:0] : GPS_LONGITUDE -- GPS Longitude (32-bit IEEE Floating Point Value)
:return: GPS_LONGITUDE as float;
"""
addr = 0x7E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_LONGITUDE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_longitude, = struct.unpack('>f', payload[0:4])
return reg, gps_longitude,
@property
def dreg_gps_altitude(self):
"""
Contains the GPS-reported altitude in meters.
Payload structure:
[31:0] : GPS_ALTITUDE -- GPS Altitude (32-bit IEEE Floating Point Value)
:return: GPS_ALTITUDE as float;
"""
addr = 0x7F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_ALTITUDE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_altitude, = struct.unpack('>f', payload[0:4])
return reg, gps_altitude,
@property
def dreg_gps_course(self):
"""
Contains the GPS-reported course in degrees.
Payload structure:
[31:0] : GPS_COURSE -- GPS Course (32-bit IEEE Floating Point Value)
:return: GPS_COURSE as float;
"""
addr = 0x80
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_COURSE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_course, = struct.unpack('>f', payload[0:4])
return reg, gps_course,
@property
def dreg_gps_speed(self):
"""
Contains the GPS-reported speed in m/s.
Payload structure:
[31:0] : GPS_SPEED -- GPS Speed (32-bit IEEE Floating Point Value)
:return: GPS_SPEED as float;
"""
addr = 0x81
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SPEED')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_speed, = struct.unpack('>f', payload[0:4])
return reg, gps_speed,
@property
def dreg_gps_time(self):
"""
Contains the GPS-reported time in seconds from the last epoch.
Payload structure:
[31:0] : GPS_TIME -- GPS Speed (32-bit IEEE Floating Point Value)
:return: GPS_TIME as float;
"""
addr = 0x82
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gps_time, = struct.unpack('>f', payload[0:4])
return reg, gps_time,
@property
def dreg_gps_sat_1_2(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 1 and 2.
Payload structure:
[31:24] : SAT_1_ID -- Satellite 1 ID
[23:16] : SAT_1_SNR -- Signal-to-Noise Ratio of satellite 1 as reported by GPS receiver.
[15:8] : SAT_2_ID -- Satellite 2 ID
[7:0] : SAT_2_SNR -- Signal-to-Noise Ratio of satellite 2 as reported by GPS receiver.
:return: SAT_1_ID as uint8_t; SAT_1_SNR as uint8_t; SAT_2_ID as uint8_t; SAT_2_SNR as uint8_t;
"""
addr = 0x83
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_1_2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_1_id, sat_1_snr, sat_2_id, sat_2_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_1_id, sat_1_snr, sat_2_id, sat_2_snr
@property
def dreg_gps_sat_3_4(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 3 and 4.
Payload structure:
[31:24] : SAT_3_ID -- Satellite 3 ID
[23:16] : SAT_3_SNR -- Signal-to-Noise Ratio of satellite 3 as reported by GPS receiver.
[15:8] : SAT_4_ID -- Satellite 4 ID
[7:0] : SAT_4_SNR -- Signal-to-Noise Ratio of satellite 4 as reported by GPS receiver.
:return: SAT_3_ID as uint8_t; SAT_3_SNR as uint8_t; SAT_4_ID as uint8_t; SAT_4_SNR as uint8_t;
"""
addr = 0x84
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_3_4')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_3_id, sat_3_snr, sat_4_id, sat_4_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_3_id, sat_3_snr, sat_4_id, sat_4_snr
@property
def dreg_gps_sat_5_6(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 5 and 6.
Payload structure:
[31:24] : SAT_5_ID -- Satellite 5 ID
[23:16] : SAT_5_SNR -- Signal-to-Noise Ratio of satellite 5 as reported by GPS receiver.
[15:8] : SAT_6_ID -- Satellite 6 ID
[7:0] : SAT_6_SNR -- Signal-to-Noise Ratio of satellite 6 as reported by GPS receiver.
:return: SAT_5_ID as uint8_t; SAT_5_SNR as uint8_t; SAT_6_ID as uint8_t; SAT_6_SNR as uint8_t;
"""
addr = 0x85
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_5_6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_5_id, sat_5_snr, sat_6_id, sat_6_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_5_id, sat_5_snr, sat_6_id, sat_6_snr
@property
def dreg_gps_sat_7_8(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 7 and 8.
Payload structure:
[31:24] : SAT_7_ID -- Satellite 7 ID
[23:16] : SAT_7_SNR -- Signal-to-Noise Ratio of satellite 7 as reported by GPS receiver.
[15:8] : SAT_8_ID -- Satellite 8 ID
[7:0] : SAT_8_SNR -- Signal-to-Noise Ratio of satellite 8 as reported by GPS receiver.
:return: SAT_7_ID as uint8_t; SAT_7_SNR as uint8_t; SAT_8_ID as uint8_t; SAT_8_SNR as uint8_t;
"""
addr = 0x86
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_7_8')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_7_id, sat_7_snr, sat_8_id, sat_8_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_7_id, sat_7_snr, sat_8_id, sat_8_snr
@property
def dreg_gps_sat_9_10(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 9 and 10.
Payload structure:
[31:24] : SAT_9_ID -- Satellite 9 ID
[23:16] : SAT_9_SNR -- Signal-to-Noise Ratio of satellite 9 as reported by GPS receiver.
[15:8] : SAT_10_ID -- Satellite 10 ID
[7:0] : SAT_10_SNR -- Signal-to-Noise Ratio of satellite 10 as reported by GPS receiver.
:return: SAT_9_ID as uint8_t; SAT_9_SNR as uint8_t; SAT_10_ID as uint8_t; SAT_10_SNR as uint8_t;
"""
addr = 0x87
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_9_10')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_9_id, sat_9_snr, sat_10_id, sat_10_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_9_id, sat_9_snr, sat_10_id, sat_10_snr
@property
def dreg_gps_sat_11_12(self):
"""
Contains satellite ID and signal-to-noise ratio (SNR) for satellites 11 and 12.
Payload structure:
[31:24] : SAT_11_ID -- Satellite 11 ID
[23:16] : SAT_11_SNR -- Signal-to-Noise Ratio of satellite 11 as reported by GPS receiver.
[15:8] : SAT_12_ID -- Satellite 12 ID
[7:0] : SAT_12_SNR -- Signal-to-Noise Ratio of satellite 12 as reported by GPS receiver.
:return: SAT_11_ID as uint8_t; SAT_11_SNR as uint8_t; SAT_12_ID as uint8_t; SAT_12_SNR as uint8_t;
"""
addr = 0x88
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GPS_SAT_11_12')
reg.raw_value, = struct.unpack('>I', payload[0:4])
sat_11_id, sat_11_snr, sat_12_id, sat_12_snr = struct.unpack('>BBBB', payload[0:4])
return reg, sat_11_id, sat_11_snr, sat_12_id, sat_12_snr
@property
def dreg_gyro_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro in degrees/s.
Payload structure:
[31:0] : GYRO_BIAS_X -- Gyro bias X (32-bit IEEE Floating Point Value)
:return: GYRO_BIAS_X as float;
"""
addr = 0x89
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_bias_x,
@property
def dreg_gyro_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro in degrees/s.
Payload structure:
[31:0] : GYRO_BIAS_Y -- Gyro bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_BIAS_Y as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_bias_y,
@property
def dreg_gyro_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro in degrees/s.
Payload structure:
[31:0] : GYRO_BIAS_Z -- Gyro bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_BIAS_Z as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_bias_z,
@property
def get_fw_revision(self):
"""
Firmware build identification string: a four byte ASCII character sequence which corresponds to a firmware
series.
Payload structure:
[31:0] : FW_REVISION -- Firmware revision string
:return: FW_REVISION as string;
"""
addr = 0xAA
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_REVISION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
fw_revision = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return fw_revision
@property
def flash_commit(self):
raise RuntimeError('flash_commit has no getter! The register flash_commit is write-only!')
@flash_commit.setter
def flash_commit(self, new_value):
addr = 0xAB
self.write_register(addr, new_value)
@property
def reset_to_factory(self):
raise RuntimeError('reset_to_factory has no getter! The register reset_to_factory is write-only!')
@reset_to_factory.setter
def reset_to_factory(self, new_value):
addr = 0xAC
self.write_register(addr, new_value)
@property
def zero_gyros(self):
raise RuntimeError('zero_gyros has no getter! The register zero_gyros is write-only!')
@zero_gyros.setter
def zero_gyros(self, new_value):
addr = 0xAD
self.write_register(addr, new_value)
@property
def set_home_position(self):
raise RuntimeError('set_home_position has no getter! The register set_home_position is write-only!')
@set_home_position.setter
def set_home_position(self, new_value):
addr = 0xAE
self.write_register(addr, new_value)
@property
def set_mag_reference(self):
raise RuntimeError('set_mag_reference has no getter! The register set_mag_reference is write-only!')
@set_mag_reference.setter
def set_mag_reference(self, new_value):
addr = 0xB0
self.write_register(addr, new_value)
@property
def calibrate_accelerometers(self):
raise RuntimeError('calibrate_accelerometers has no getter! The register calibrate_accelerometers is write-only!')
@calibrate_accelerometers.setter
def calibrate_accelerometers(self, new_value):
addr = 0xB1
self.write_register(addr, new_value)
@property
def reset_ekf(self):
raise RuntimeError('reset_ekf has no getter! The register reset_ekf is write-only!')
@reset_ekf.setter
def reset_ekf(self, new_value):
addr = 0xB3
self.write_register(addr, new_value)
@property
def build_id(self):
"""
Build ID shows BUILD version of programmed firmware
Payload structure:
[31:24] : VERSION_MAJOR -- MAJOR VERSION of the programmed firmware
[23:16] : VERSION_MINOR -- MINOR VERSION of the programmed firmware
[15:0] : BUILD_ID -- CI BUILD ID of the programmed firmware
:return: VERSION_MAJOR as uint8_t; VERSION_MINOR as uint8_t; BUILD_ID as uint16_t;
"""
addr = 0xB5
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BUILD_ID')
reg.raw_value, = struct.unpack('>I', payload[0:4])
version_major, version_minor, build_id = struct.unpack('>BBH', payload[0:4])
return reg, version_major, version_minor, build_id
@property
def hidden_gyro_variance(self):
"""
Gyro variance
Payload structure:
[31:0] : GYRO_VARIANCE -- Gyro variance for EKF
:return: GYRO_VARIANCE as float;
"""
addr = 0x00
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_variance, = struct.unpack('>f', payload[0:4])
return reg, gyro_variance,
@hidden_gyro_variance.setter
def hidden_gyro_variance(self, new_value):
addr = 0x00
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_variance(self):
"""
Accelerometer variance
Payload structure:
[31:0] : ACCEL_VARIANCE -- Accelerometer variance IEEE floating point value.
:return: ACCEL_VARIANCE as float;
"""
addr = 0x01
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_variance, = struct.unpack('>f', payload[0:4])
return reg, accel_variance,
@hidden_accel_variance.setter
def hidden_accel_variance(self, new_value):
addr = 0x01
self.write_register(addr, new_value, hidden=True)
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/um7_registers.py | 0.891233 | 0.326218 | um7_registers.py | pypi |
# Author: Dr. Konstantin Selyunin
# License: MIT
from dataclasses import dataclass
from pathlib import Path
from typing import Any, List, Tuple, Union
from xml.etree import ElementTree as ET
@dataclass
class EnumeratedValue:
name: str
description: str
value: int
def __repr__(self):
return f"EnumeratedValue(name={self.name} -> value={self.value})"
@dataclass
class Field:
name: str
description: str
bit_range: Tuple[int, int]
data_type: str
access: str
enumerated_values: Tuple[EnumeratedValue] = tuple()
def __repr__(self):
return f"Field(name={self.name}, "\
f"bit_range={self.bit_range}, data_type={self.data_type}, "\
f"access={self.access}, enumerated_values={self.enumerated_values})"
def find_enum_entry_by(self, **kw) -> EnumeratedValue:
(prop, value), = kw.items()
if len(kw) != 1 or prop not in ['name', 'value']:
raise NotImplementedError(f"One pair is supported, with key either `name` or `value`, but given: {kw}!")
if self.enumerated_values is None or len(self.enumerated_values) == 0:
return value
found_enum = next(filter(lambda x: getattr(x, prop) == value, self.enumerated_values), None)
return found_enum
def get_c_type(self) -> str:
c_type_mapping = {
'bitField' : 'uint32_t',
'uint8_t' : 'uint8_t',
'int8_t' : 'int8_t',
'uint16_t' : 'uint16_t',
'int16_t' : 'int16_t',
'uint32_t' : 'uint32_t',
'int32_t' : 'int32_t',
'uint64_t' : 'uint64_t',
'int64_t' : 'int64_t',
'float' : 'float',
'double' : 'double',
'string' : 'char'
}
return c_type_mapping[self.data_type]
@dataclass
class Register:
name: str
description: str
access: str
address: int
fields: List[Field]
raw_value: int = 0
def __repr__(self):
return f"Register(name={self.name}, address={self.address}, access={self.access}, fields={self.fields})"
def find_field_by(self, name: str = '', bit_position: int = -1) -> Union[Field, None]:
if name != '':
return next(filter(lambda x: x.name == name, self.fields), None)
elif bit_position != -1:
return next(
filter(lambda x: bit_position in set(range(x.bit_range[1], x.bit_range[0] + 1)), self.fields), None)
else:
return None
def get_fields_and_gaps(self) -> List:
register_bits = range(0, 32)
field_in_bit_position = []
for bit in register_bits:
found_field = self.find_field_by(bit_position=bit)
if found_field is None:
field_in_bit_position.append(Field(name=None, description='', bit_range=(0,), data_type='', access=''))
else:
field_in_bit_position.append(found_field)
fields_and_gaps = []
for el in field_in_bit_position:
if len(fields_and_gaps) == 0 or (el.name not in fields_and_gaps[-1].keys()):
fields_and_gaps.append({el.name: 1})
elif el.name in fields_and_gaps[-1].keys():
fields_and_gaps[-1][el.name] += 1
return fields_and_gaps
@property
def field_names(self):
return [el.name for el in self.fields]
def as_tuple(self) -> Tuple[EnumeratedValue]:
return tuple(self.field_enum(el) for el in self.field_names)
def field_enum(self, name: str = '') -> EnumeratedValue:
field = self.find_field_by(name=name)
field_value = self.field_value(name)
enum_entry = field.find_enum_entry_by(value=field_value)
if not isinstance(enum_entry, EnumeratedValue):
enum_entry = EnumeratedValue(name='', value=field_value, description='')
return enum_entry
def from_tuple(self, fields: Tuple[EnumeratedValue]):
print(fields)
raise NotImplementedError("Assigning from tuple is not implemented yet!")
def as_dict(self):
object_as_dict = {}
object_fields = vars(self).keys()
for object_field in object_fields:
if object_field == 'fields':
object_as_dict['fields'] = []
for register_field in vars(self)['fields']:
field_dict = vars(register_field).copy()
field_dict.pop('enumerated_values')
enum_value = self.field_enum(register_field.name)
field_dict['value'] = vars(enum_value)
object_as_dict['fields'].append(field_dict)
else:
object_as_dict[object_field] = vars(self)[object_field]
return object_as_dict
def set_bits_for_field(self, field: Field) -> int:
msb, lsb = field.bit_range
return Register.set_bits_for_range(msb, lsb)
@staticmethod
def set_bits_for_range(msb: int, lsb: int) -> int:
return ((1 << lsb) - 1) ^ ((1 << (msb + 1)) - 1)
def field_value(self, name: str = '') -> int:
field = self.find_field_by(name=name)
if field is None:
raise NotImplementedError(f"You provided field '{name}' for register {self.name}. "
f"Check the data sheet and provide correct name!")
if field.data_type == 'float':
field_value = self.raw_value
else:
bit_mask = self.set_bits_for_field(field)
field_value = self.raw_value & bit_mask
field_value = field_value >> field.bit_range[1]
return field_value
def set_field_value(self, **kw):
(prop, value), = kw.items()
if len(kw) != 1:
raise NotImplementedError(f"Only setting 1 property at a time is supported, but got: {kw}!")
field = self.find_field_by(name=prop)
msb, lsb = field.bit_range
zero_mask = ~(((1 << (msb - lsb + 1)) - 1) << lsb)
self.raw_value &= zero_mask
bit_mask = (1 << (msb - lsb + 1)) - 1
self.raw_value |= (bit_mask & value) << lsb
class RslSvdParser:
def __init__(self, *args, **kwargs):
script_folder = Path(__file__).parent
self.svd_xml_file = script_folder / 'shearwater.svd' if not kwargs.get('svd_file') else kwargs.get('svd_file')
self.svd_xml_root = RslSvdParser.parse_svd_file(self.svd_xml_file)
self.svd_regs = RslSvdParser.find_main_register_xml_root_in_svd(self.svd_xml_root)
self.hidden_regs_xml = RslSvdParser.find_hidden_register_xml_root_in_svd(self.svd_xml_root)
self.svd_cregs = self.find_cregs_in_svd()
self.svd_dregs = self.get_dregs_from_svd()
self.svd_commands = self.get_commands_from_svd()
self.cregs = self.get_cregs_objects()
self.dregs = self.get_dreg_objects()
self.commands = self.get_commands_objects()
self.hidden_regs = self.get_hidden_objects()
self.regs = self.cregs + self.dregs + self.commands
@staticmethod
def parse_svd_file(file_to_parse: Union[str, Path]):
if isinstance(file_to_parse, str):
file_to_parse = Path(file_to_parse)
if not file_to_parse.exists():
raise FileNotFoundError(f"Non-existing SVD file provided, check if ``{file_to_parse}`` exists!")
return ET.parse(file_to_parse).getroot()
@staticmethod
def find_main_register_xml_root_in_svd(parsed_xml_tree_root: ET.Element) -> List[ET.Element]:
main_register_map_peripheral = parsed_xml_tree_root.find('.//peripheral/[name="MAIN_REGISTER_MAP"]')
return main_register_map_peripheral.findall('.//register')
@staticmethod
def find_hidden_register_xml_root_in_svd(parsed_xml_tree_root: ET.Element) -> List[ET.Element]:
hidden_register_map_peripheral = parsed_xml_tree_root.find('.//peripheral/[name="HIDDEN_REGISTER_MAP"]')
return hidden_register_map_peripheral.findall('.//register')
def find_cregs_in_svd(self) -> Tuple[Any, ...]:
return tuple(el for el in self.svd_regs if 'CREG' in el.find('./name').text)
def get_dregs_from_svd(self) -> Tuple[Any, ...]:
return tuple(el for el in self.svd_regs if 'DREG' in el.find('./name').text)
def get_commands_from_svd(self) -> Tuple[Any, ...]:
return tuple(el for el in self.svd_regs if int(el.find('./address').text, 16) >= 0xAA)
def get_cregs_objects(self) -> Tuple[Register]:
return tuple(self.extract_register_fields(el) for el in self.svd_cregs)
def get_dreg_objects(self) -> Tuple[Register]:
return tuple(self.extract_register_fields(el) for el in self.svd_dregs)
def get_commands_objects(self) -> Tuple[Register]:
return tuple(self.extract_register_fields(el) for el in self.svd_commands)
def get_hidden_objects(self) -> Tuple[Register]:
return tuple(self.extract_register_fields(el) for el in self.hidden_regs_xml)
@staticmethod
def find_by(registers: Tuple[Register], **kw) -> Union[None, Register]:
(prop, value), = kw.items()
if len(kw) > 1 or prop not in ['name', 'address']:
raise NotImplementedError(f"One pair is supported, with key either `name` or `address`, but given: {kw}!")
found_register = next(filter(lambda x: getattr(x, prop) == value, registers), None)
return found_register
def find_register_by(self, **kw) -> Union[None, Register]:
return RslSvdParser.find_by(self.regs, **kw)
def find_hidden_register_by(self, **kw):
return RslSvdParser.find_by(self.hidden_regs, **kw)
@staticmethod
def get_enumerated_value(enum_value: ET.Element) -> EnumeratedValue:
name = enum_value.find('.//name').text
description = enum_value.find('.//description').text
value = int(enum_value.find('.//value').text)
return EnumeratedValue(name=name, description=description, value=value)
def get_enumerated_values(self, enum_values: ET.Element) -> Tuple[EnumeratedValue]:
if enum_values:
return tuple(self.get_enumerated_value(child) for child in enum_values)
def extract_field_info(self, field: ET.Element) -> Field:
name = field.find('.//name').text
description = field.find('.//description').text
bit_range_str = field.find('.//bitRange').text
bit_range = tuple(int(el) for el in bit_range_str.strip('[]').split(':'))
access = field.find('.//access').text
data_type = field.find('.//dataType').text
enumerated_values = self.get_enumerated_values(field.find('.//enumeratedValues'))
return Field(name=name,
description=description,
bit_range=bit_range,
data_type=data_type,
access=access,
enumerated_values=enumerated_values)
def extract_register_fields(self, reg_desc: ET.Element) -> Register:
reg_name = reg_desc.find('.//name').text
reg_access = reg_desc.find('.//access').text
description = reg_desc.find('.//description').text
address = int(reg_desc.find('.//address').text, 16)
fields = reg_desc.findall('.//field')
field_info = [self.extract_field_info(field) for field in fields]
return Register(name=reg_name,
access=reg_access,
description=description,
address=address,
fields=field_info)
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/rsl_xml_svd/rsl_svd_parser.py | 0.850453 | 0.350421 | rsl_svd_parser.py | pypi |
from urlparse import urlunsplit, urlsplit
from os.path import basename
import cgi
from urllib import urlencode
from zope.interface import implements
from rsl.globalregistry import lookupimpl
from rsl.interfaces import IServiceDescription, ITransport, IProxy
from rsl.implementations import OperationInfo
def loadHTTPGET(url):
"""
returns a Description object for a ReST-ful Web Service.
The url is used as an example request and param and methd names
are extracted from this url.
@param url: The example url.
@type url: C{string}
@return: Returns a Description object from the given url.
@rtype: L{RESTDescr}
"""
rsd = RESTDescr()
rsd.fromURL(url)
return rsd
class RESTDescr(object):
'''
The IServiceDescription implementation for ReST-ful Web Services.
This class also implements the IProxy interface.
'''
implements(IServiceDescription, IProxy)
descname = 'httpurl'
ptype = 'httpget'
def __init__(self, location=None):
'''
create an instance and empty all instance variables.
'''
self.url = location
self.callables = {}
def fromURL(self, url, **kws):
'''
initialise this instance. parse the url and try to identify
methodname and parameter names.
'''
self.url = url
pdu = urlsplit(self.url)
params = cgi.parse_qs(pdu.query, True, False)
opinfo = OperationInfo()
opinfo.name = basename(pdu.path)
opinfo.location = urlunsplit((pdu[0], pdu[1], pdu[2], '', ''))
opinfo.input = params.keys()
self.addOperation(opinfo.name, opinfo)
def getProxy(self, **_):
'''
return self as IProxy implementation.
'''
return self
def getServices(self):
'''
return all service names described by this IServiceDescription.
'''
return [{'name': self.url}]
def callRemote(self, name, *args, **kws):
'''
invoke a remote method offered by this IProxy.
'''
self.__getattr__(name)(*args, **kws)
def addOperation(self, name, operationinfo):
'''
add the signature of a remote method.
'''
self.callables[name] = Callable(operationinfo.location,
name, operationinfo.input)
def __getattr__(self, name):
'''
make remote methods accessible as normal python method.
'''
cbl = self.callables.get(name, None)
if cbl is not None:
return cbl
class Callable(object):
'''
Callable instances are used by ReST proxy instances to provide python
methods which actually execute the remote method.
'''
def __init__(self, url, name, params):
'''
initialise this callable with all necessary information.
'''
pdu = urlsplit(url)
self.purl = (pdu[0], pdu[1], pdu[2])
self.name = name
self.paramnames = params
def __call__(self, **kwargs):
'''
invoke the remote method.
'''
target = urlunsplit((self.purl[0], self.purl[1], self.purl[2],
urlencode(kwargs),''))
transport = lookupimpl(ITransport, self.purl[0])
headers, data = transport.send(target, None)
print headers['content-type']
return data | /rsl.rest-0.2.0.tar.bz2/rsl.rest-0.2.0/src/rsl/rest/rest.py | 0.665302 | 0.253561 | rest.py | pypi |
from pkg_resources import resource_stream
from lxml import etree
from zope.interface import classProvides, directlyProvides
from rsl.interfaces import ISchemaFactory
from rsl.misc.namespace import clark, qname2clark, url2ns
from rsl.xsd.deserialtypes import List, Dict
from rsl.xsd.urtype import AnySimpleType, AnyType
from rsl.xsd.component import XSElement, XSAny, XSSimpleType
from rsl.xsd.namespace import NS_XMLSCHEMA_INSTANCE, NS_XMLSCHEMA
from rsl.xsd.interfaces import IXMLSerializer, IXMLDeserializer
from rsl.xsd.schema import XMLSchema
from rsl.soap11.namespace import NS_SOAPENC
def createsoapencschema(schemaparent):
'''
factory for bundled soapenc schema.
'''
xmlschema = resource_stream(__name__, 'xsds/soapenc.xsd')
tree = etree.parse(xmlschema)
schema = XMLSchema(schemaparent)
schema.frometree(tree.getroot())
#arrayType = tree.find('{%s}attribute[@name="arrayType"]' % (NS_XMLSCHEMA))
#schema.attributes['arrayType'] = SoapEncArrayType(arrayType, schema)
return schema
directlyProvides(createsoapencschema, ISchemaFactory)
def createsoapenvschema(schemaparent):
'''
factory for bundled soapenv schema
'''
xmlschema = resource_stream(__name__, 'xsds/soapenv.xsd')
tree = etree.parse(xmlschema)
schema = XMLSchema(schemaparent)
schema.frometree(tree.getroot())
return schema
directlyProvides(createsoapenvschema, ISchemaFactory)
def createsoapschema(schemaparent):
'''
factory for bundled soap-wsdl extension schema.
'''
xmlschema = resource_stream(__name__, 'xsds/wsdlsoap.xsd')
tree = etree.parse(xmlschema)
schema = XMLSchema(schemaparent)
schema.frometree(tree.getroot())
return schema
directlyProvides(createsoapschema, ISchemaFactory)
class SoapEncElementSerializer(object):
'''
Implementation for soap encoded de/serialiser which uses an element
name as starting point.
This class just wraps SoapEncSerializer to adapt to IXML(de)Serialiser
interface.
'''
classProvides(IXMLSerializer, IXMLDeserializer)
@classmethod
def serialize(cls, params, typename, schema, root):
'''
params ... data structure to serialize
typename ... element name in typesystem
schema ... ISchema
root ... serialize to
return newly created element
'''
eltype = schema.getElement(typename)
if root is None:
root = etree.Element(eltype.getname())
else:
root = etree.SubElement(root, eltype.getname())
xstype = eltype.gettype()
return SoapEncSerializer.serialize(params, xstype, schema, root)
@classmethod
def deserialize(cls, root, typename, schema):
'''
convert soap encoded message into python data structure.
'''
xstype = schema.getElement(typename).gettype()
return SoapEncSerializer.deserialize(root, xstype, schema)
class SoapEncTypeSerializer(object):
'''
Implementation for soap encoded de/serialiser which uses an type
name as starting point.
This class just wraps SoapEncSerializer to adapt to IXML(de)Serialiser
interface.
'''
classProvides(IXMLSerializer, IXMLDeserializer)
@classmethod
def serialize(cls, params, typename, schema, root):
'''
params ... data structure to serialize
typename ... type name in typesystem
schema ... ISchema
root ... serialize to
return newly created element
'''
xstype = schema.getType(typename)
return SoapEncSerializer.serialize(params, xstype, schema, root)
@classmethod
def deserialize(cls, root, typename, xsd):
'''
convert soap encoded message into python data structure.
'''
xstype = xsd.getType(typename)
return SoapEncSerializer.deserialize(root, xstype, xsd)
class SoapEncSerializer(object):
'''
Implementation for soap encoded de/serialiser.
'''
# mapping of python data types to predefined xsd and soapenc data types.
rpc_type_map = {str : (NS_XMLSCHEMA, 'string'),
unicode: (NS_XMLSCHEMA, 'string'),
int: (NS_XMLSCHEMA, 'integer'),
float: (NS_XMLSCHEMA, 'float'),
list: (NS_SOAPENC, 'Array'),
tuple: (NS_SOAPENC, 'Array'),
dict: (NS_SOAPENC, 'Struct'),
#datetime: clark(NS_SOAPENC, 'dateTime')
}
@classmethod
def getxsdtype(cls, data):
'''
resolve python data type to xsd data typename
'''
xsdt = type(data)
if xsdt in cls.rpc_type_map:
return cls.rpc_type_map[type(data)]
return (None, None)
@classmethod
def addnamespace(cls, nsurl, elem):
'''
helper method to add a new namespace definition to the etree
data structure.
'''
oldtag = elem.tag
elem.tag = clark(nsurl, 'newname')
elem.tag = oldtag
@classmethod
def getqname(cls, nsurl, name, elem):
'''
get qualified name for namespaceurl and localname.
if necessary the required namespace definition is added to the etree
first.
'''
try:
xns = url2ns(nsurl, elem.nsmap)
except KeyError:
cls.addnamespace(nsurl, elem)
xns = url2ns(nsurl, elem.nsmap)
return '%s:%s' % (xns, name)
@classmethod
def serialize(cls, params, xstype, types, root):
'''
interface adaption to rpc-encoding serialise method.
'''
return cls.rpc_serialize(xstype, params, root, types)
@classmethod
def deserialize(cls, root, xstype, types):
'''
interface adaption to rpc-encoding deserialise method.
'''
return cls.rpc_deserialize(xstype, root, types)
@classmethod
def rpc_serialize(cls, xstype, data, root, xsd):
'''
the actual rpc-encoding serialise method.
'''
if isinstance(xstype, (AnySimpleType, XSSimpleType)):
if data is None:
root.set(clark(NS_XMLSCHEMA_INSTANCE, 'nil'), 'true')
else:
root.text = xstype.encode(data) # set xsi:type="xsi:string"
nsurl, name = xstype.gettypename()
qtype = cls.getqname(nsurl, name, root)
root.set(clark(NS_XMLSCHEMA_INSTANCE, 'type'), qtype)
return
if isinstance(xstype, XSElement): #ComplexType):
if xstype.getlocalname() not in data:
return
if xstype.maxoccurs != 1:
eltype = xstype.gettype()
for item in data[xstype.getlocalname()]:
elem = etree.SubElement(root, xstype.getname())
cls.rpc_serialize(eltype, item, elem, xsd)
else:
eltype = xstype.gettype()
elem = etree.SubElement(root, xstype.getname())
cls.rpc_serialize(eltype, data[xstype.getlocalname()],
elem, xsd)
return
if isinstance(xstype, XSAny):
# TODO: maybe if item is a dict, then use item name to serialise
# elements or use soapenc:type elements
if xstype.getlocalname() not in data:
#ok here is the point to check for dicts
if isinstance(data, dict):
#then serialize all element of dict
for key, val in data.items():
elem = etree.SubElement(root, key)
if val is None:
elem.set(clark(NS_XMLSCHEMA_INSTANCE, "nil"),
"true")
else:
nsurl, name = cls.getxsdtype(val)
qtype = cls.getqname(nsurl, name, root)
elem.set(clark(NS_XMLSCHEMA_INSTANCE, 'type'),
qtype)
xsitype = xsd.getType(clark(nsurl, name))
cls.rpc_serialize(xsitype, val, elem, xsd)
#elem.text = str(item)
return
if xstype.maxoccurs != 1:
for item in data[xstype.getlocalname()]:
try:
elem = etree.XML(item)
except:
elem = etree.SubElement(root, xstype.getname())
if item is None:
elem.set(clark(NS_XMLSCHEMA_INSTANCE, "nil"),
"true")
else:
nsurl, name = cls.getxsdtype(item)
qtype = cls.getqname(nsurl, name, root)
elem.set(clark(NS_XMLSCHEMA_INSTANCE, 'type'),
qtype)
xsitype = xsd.getType(clark(nsurl, name))
cls.rpc_serialize(xsitype, item, elem, xsd)
#elem.text = str(item)
root.append(elem)
else:
try:
elem = etree.XML(data[xstype.getlocalname()])
except:
elem = etree.SubElement(root, xstype.getname())
if data is None:
elem.set(clark(NS_XMLSCHEMA_INSTANCE, "nil"), "true")
else:
nsurl, name = cls.getxsdtype(data[xstype.getlocalname()])
qtype = cls.getqname(nsurl, name, root)
elem.set(clark(NS_XMLSCHEMA_INSTANCE, 'type'), qtype)
xsitype = xsd.getType(clark(nsurl, name))
cls.rpc_serialize(xsitype, data[xstype.getlocalname()],
elem, xsd)
#elem.text = str(data[xstype.getLocalName()])
return
nsurl, name = xstype.gettypename()
qtype = cls.getqname(nsurl, name, root)
root.set(clark(NS_XMLSCHEMA_INSTANCE, 'type'), qtype)
nexttype = xstype.getelement()
if nexttype is None:
return
if isinstance(nexttype, list):
for rectype in nexttype:
cls.rpc_serialize(rectype, data, root, xsd)
else:
cls.rpc_serialize(nexttype, data, root, xsd)
attributes = xstype.getattributes()
if attributes:
for attrib in attributes:
if '__attrs' in data:
attrdata = data['__attrs']
else:
attrdata = data
attrname = attrib.getname()
attrval = attrib.encode(attrdata)
if attrname is not None and attrval is not None:
root.set(attrname, attrval)
@classmethod
def deserialattributes(cls, xstype, elem):
'''
helper methods to deserialse xml attributes.
'''
ret = {}
attributes = xstype.getattributes()
if attributes:
for attrib in attributes:
attrname = attrib.getname()
attrvalue = None
if attrname in elem.attrib:
attrvalue = attrib.decode(elem.attrib[attrname])
ret[attrname] = attrvalue
if ret:
return ret
return None
@classmethod
def rpc_deserialize(cls, xstype, root, xsd):
'''
the actual rpc-encoding serialise method.
'''
# TODO: check for tag name in root... maybe its one of soap-enc:type ...
# then i would know too, which type it is (in AnyType if)
if root is None:
return None
if 'href' in root.attrib:
elem = root.getroottree().xpath('//*[@id="%s"]' %
(root.get("href")[1:]))[0]
return cls.rpc_deserialize(xstype, elem, xsd)
if isinstance(xstype, (AnySimpleType, XSSimpleType)):
return xstype.decode(root.text)
if isinstance(xstype, AnyType):
# TODO: Do I really have to care about anytype here? or just
# deserialze XSAny?
xsitype = root.get(clark(NS_XMLSCHEMA_INSTANCE, 'type'))
if xsitype is not None:
xsitype = xsd.getType(qname2clark(xsitype, root.nsmap))
ret = cls.rpc_deserialize(xsitype, root, xsd)
else:
ret = etree.tostring(root)
return ret
if isinstance(xstype, XSElement): #ComplexType):
ret = None
childname = xstype.getname()
if xstype.maxoccurs != 1:
ret = List()
for childelem in root.findall(childname):
xsitype = childelem.get(clark(NS_XMLSCHEMA_INSTANCE,
'type'))
if xsitype is not None:
xsitype = xsd.getType(qname2clark(xsitype,
childelem.nsmap))
else:
xsitype = xstype.gettype()
val = cls.rpc_deserialize(xsitype, childelem, xsd)
attr = cls.deserialattributes(xsitype, childelem)
if attr:
for key, value in attr.items():
setattr(val, key, value)
ret.append(val)
else:
childelem = root.find(childname)
if childelem is not None:
xsitype = childelem.get(clark(NS_XMLSCHEMA_INSTANCE,
'type'))
if xsitype is not None:
xsitype = xsd.getType(qname2clark(xsitype,
childelem.nsmap))
else:
xsitype = xstype.gettype()
val = cls.rpc_deserialize(xsitype, childelem, xsd)
attr = cls.deserialattributes(xsitype, childelem)
if attr:
for key, value in attr.items():
setattr(val, key, value)
ret = val
return ret
if isinstance(xstype, XSAny):
# TODO: check for multiplicity unbound
if xstype.maxoccurs != 1:
ret = List()
for childelem in root:
xsitype = childelem.get(clark(NS_XMLSCHEMA_INSTANCE,
'type'))
if xsitype is not None:
xsitype = xsd.getType(qname2clark(xsitype,
childelem.nsmap))
else:
# check for elem name
try:
xsitype = xsd.getType(childelem.tag)
except KeyError:
xsitype = xstype.gettype()
ret.append(cls.rpc_deserialize(xsitype, childelem, xsd))
else:
childelem = root[0]
xsitype = childelem.get(clark(NS_XMLSCHEMA_INSTANCE, 'type'))
if xsitype is not None:
xsitype = xsd.getType(qname2clark(xsitype, childelem.nsmap))
else:
# check for elem name
try:
xsitype = xsd.getType(childelem.tag)
except KeyError:
xsitype = xstype.gettype()
ret = cls.rpc_deserialize(xsitype, childelem, xsd)
return ret
nexttype = xstype.getelement()
if nexttype is None:
return
if isinstance(nexttype, list):
ret = Dict()
for rectype in nexttype: # TODO: possibly create map here
ret[rectype.getlocalname()] = cls.rpc_deserialize(rectype,
root, xsd)
if len(nexttype) == 1:
ret = ret.values()[0]
return ret
else:
return cls.rpc_deserialize(nexttype, root, xsd) | /rsl.soap11-0.2.2.tar.gz/rsl.soap11-0.2.2/src/rsl/soap11/soapenc.py | 0.496094 | 0.197212 | soapenc.py | pypi |
from lxml import etree
from zope.interface import classProvides
from rsl.misc.namespace import clark
from rsl.xsd.interfaces import IXMLSerializer, IXMLDeserializer
from rsl.xsd.deserialtypes import List, Dict
from rsl.xsd.urtype import AnySimpleType
from rsl.xsd.component import XSElement, XSAny, XSSimpleType
from rsl.xsd.namespace import NS_XMLSCHEMA_INSTANCE
class XMLElementSerializer(object):
'''
de/serializer methods to start with a xsd-element definition as root.
'''
classProvides(IXMLSerializer, IXMLDeserializer)
@classmethod
def serialize(cls, params, typename, schema, root):
'''
params ... data structure to serialize
typename ... element name in typesystem
schema ... ISchema/Manager instance
root ... append serialized date to root (etree)
return newly created element
'''
eltype = schema.getElement(typename)
if root is None:
root = etree.Element(eltype.getname())
else:
root = etree.SubElement(root, eltype.getname())
xstype = eltype.gettype()
return XMLSchemaSerializer.serialize(params, xstype, schema, root)
@classmethod
def deserialize(cls, root, typename, schema):
'''
root ... etree element to deserialise
typename ... the element name to start serialisation with.
schema ... ISchema/Manager instance
return python data structure representing root
'''
xstype = schema.getElement(typename).gettype()
return XMLSchemaSerializer.deserialize(root, xstype, schema)
class XMLTypeSerializer(object):
'''
de/serializer methods to start with a xsd-type definition as root.
'''
classProvides(IXMLSerializer, IXMLDeserializer)
@classmethod
def serialize(cls, params, typename, schema, root):
'''
params ... data structure to serialize
typename ... type name in typesystem
schema ... ISchema/Manager instance
root ... append serialized date to root (etree)
return newly created element
'''
xstype = schema.getType(typename)
return XMLSchemaSerializer.serialize(params, xstype, schema, root)
@classmethod
def deserialize(cls, root, typename, xsd):
'''
root ... etree element to deserialise
typename ... the type name to start serialisation with.
schema ... ISchema/Manager instance
return python data structure representing root
'''
xstype = xsd.getType(typename)
return XMLSchemaSerializer.deserialize(root, xstype, xsd)
class XMLSchemaSerializer(object):
'''
de/serializer methods to start with a xsd-type instance as root.
'''
#classImplements(IXMLSerializer, IXMLDeserializer)
@classmethod
def serialize(cls, params, xstype, types, root):
'''
this function is the entry point for the serialisation mechanism
and just provides the IXMLSerializer interface.
TODO: clarify parameter types. maybe this method is not conformant
to the interface
'''
return cls._serialize(xstype, params, root)
@classmethod
def deserialize(cls, root, xstype, xsd):
'''
this function is the entry point for the deserialisation mechanism
and just provides the IXMLDeserializer interface.
TODO: clarify parameter types. maybe this method is not conformant
to the interface
'''
return cls._deserialize(xstype, root)
@classmethod
def _serialize(cls, xstype, data, root):
'''
the actual serialisation method.
xstype: an xsd-type or element instance.
data: the python data structure to serialise
root: the element to add the serialised data (etree)
returns: nothing
'''
if isinstance(xstype, (AnySimpleType, XSSimpleType)):
if data is None:
root.set(clark(NS_XMLSCHEMA_INSTANCE, 'nil'), 'true')
else:
root.text = xstype.encode(data)
return
if isinstance(xstype, XSElement): #ComplexType):
if xstype.getlocalname() not in data:
return
if xstype.maxoccurs != 1:
eltype = xstype.gettype()
for item in data[xstype.getlocalname()]:
elem = etree.SubElement(root, xstype.getname())
cls._serialize(eltype, item, elem)
else:
eltype = xstype.gettype()
elem = etree.SubElement(root, xstype.getname())
cls._serialize(eltype, data[xstype.getlocalname()], elem)
return
if isinstance(xstype, XSAny):
if xstype.maxOccurs != 1:
#TODO: maybe if item is a dict, then use item name to serialize
for item in data:
try:
elem = etree.XML(item)
except:
elem = etree.SubElement(root, 'any')
if item is None:
elem.set(clark(NS_XMLSCHEMA_INSTANCE, "nil"),
"true")
else:
elem.text = str(item)
root.append(elem)
else:
try:
elem = etree.XML(data)
except:
elem = etree.SubElement(root, 'any')
if data is None:
elem.set(clark(NS_XMLSCHEMA_INSTANCE, "nil"), "true")
else:
elem.text = str(data)
return
nexttype = xstype.getelement()
if nexttype is not None:
if isinstance(nexttype, list):
for rectype in nexttype:
cls._serialize(rectype, data, root)
else:
cls._serialize(nexttype, data, root)
attributes = xstype.getattributes()
if attributes:
for attrib in attributes:
# TODO: let attribut only serialise a single value and do name
# checking before...
# How to handle fixed? default?
if '__attrs' in data:
attrdata = data['__attrs']
else:
attrdata = data
attrname = attrib.getname()
attrval = attrib.encode(attrdata)
if attrname is not None and attrval is not None:
root.set(attrname, attrval)
@classmethod
def deserialattributes(cls, xstype, elem):
'''
a helper function to deserialise the attributes of an etree-element.
returns a dictionary of attrname-value pairs.
'''
ret = {}
attributes = xstype.getattributes()
if attributes:
for attrib in attributes:
attrname = attrib.getname()
attrvalue = None
if attrname in elem.attrib:
attrvalue = attrib.decode(elem.attrib[attrname])
ret[attrname] = attrvalue
if ret:
return ret
return None
@classmethod
def _deserialize(cls, xstype, root):
'''
the actual deserialisation method.
xstype: an xsd-type or element instance.
root: the element to start deserialisation from (etree)
returns: python data structure
'''
if root is None:
return None
if isinstance(xstype, (AnySimpleType, XSSimpleType)):
return xstype.decode(root.text)
if isinstance(xstype, XSElement):
ret = None
childname = xstype.getname()
if xstype.maxoccurs != 1:
ret = List()
for childelem in root.findall(childname):
# childelem has attributes, append dict.
val = cls._deserialize(xstype.gettype(), childelem)
attr = cls.deserialattributes(xstype, childelem)
if attr:
if val is None: # TODO: ugly hack for empty elements
# with attributes
val = attr
else:
for key, attrval in attr.items():
setattr(val, key, attrval)
ret.append(val)
else:
childelem = root.find(childname)
if childelem is not None:
# childelem has attributes, append dict
val = cls._deserialize(xstype.gettype(), childelem)
attr = cls.deserialattributes(xstype, childelem)
if attr:
if val is None: # TODO: ugly hack for empty elements
# with attributes
val = attr
else:
for key, attrval in attr.items():
setattr(val, key, attrval)
ret = val
return ret
if isinstance(xstype, XSAny):
# TODO: check for multiplicity unbound
# TODO: try to find type for element name?
ret = None
if xstype.maxoccurs != 1:
ret = List()
for childelem in root:
ret.append(cls._deserialize(xstype.gettype(), childelem))
else:
childelem = root[0]
ret = cls._deserialize(xstype.gettype(), childelem)
return ret
nexttype = xstype.getelement()
if nexttype is None:
return None
if isinstance(nexttype, list):
nexttype = cls.flatten(nexttype)
ret = Dict()
for rectype in nexttype: # TODO: possibly create map here
ret[rectype.getlocalname()] = cls._deserialize(rectype, root)
if len(nexttype) == 1:
ret = ret.values()[0]
return ret
else:
return cls._deserialize(nexttype, root)
@classmethod
def flatten(cls, seq):
'''
flattens a list of lists/tuples to a single list.
'''
res = []
for item in seq:
if (isinstance(item, (tuple, list))):
res.extend(cls.flatten(item))
else:
res.append(item)
return res | /rsl.xsd-0.2.4.tar.gz/rsl.xsd-0.2.4/src/rsl/xsd/serializer.py | 0.433622 | 0.247726 | serializer.py | pypi |
import textwrap
from typing import Any, Optional, Union
class EscapedString:
def __init__(self, src: str = "", chars: Optional[str] = None):
self.escape_chars = set() if chars is None else set(chars)
self._src = str(src)
def __contains__(self, sub: str) -> bool:
return sub in self._src
def __len__(self) -> int:
return len(self._src)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.escape_chars}, {textwrap.shorten(self._src, 60)})"
def __str__(self) -> str:
return self._src
def escape(self) -> str:
ret = self._src
for char in self.escape_chars:
ret = ret.replace(f"\\{char}", char)
return ret
def __getitem__(self, _slice: Union[int, slice]) -> "EscapedString":
return self.__class__(self._src[_slice])
def __add__(self, other: str) -> str:
return self._src + other
def __radd__(self, other: str) -> str:
return other + self._src
def __eq__(self, other: Any) -> bool:
return self._src == other
def __hash__(self) -> int:
return hash(self._src)
def format(self, *args: Any, **kwargs: Any) -> str:
return self._src.format(*args, **kwargs)
def strip(self, chars: Optional[str] = None, /) -> "EscapedString":
return self.__class__(self._src.strip(chars))
def lstrip(self, chars: Optional[str] = None, /) -> "EscapedString":
return self.__class__(self._src.lstrip(chars))
def rstrip(self, chars: Optional[str] = None, /) -> "EscapedString":
return self.__class__(self._src.rstrip(chars))
def find(
self,
sub: str,
start: int = 0,
end: Optional[int] = None,
skip_escaped: bool = True,
) -> int:
if end is None:
end = len(self._src)
if not skip_escaped or sub not in self.escape_chars:
return self._src.find(sub, start, end)
index = self._src.find(sub, start, end)
if index < 1:
# If sub was found at index 0, it cannot have been escaped since there is
# nothing behind it! If the index was negative, it was not found. Either
# way, we are done.
return index
while self._src[index - 1] == "\\":
index = self._src.find(sub, index + 1, end)
if index < 0:
return index
return index
def rfind(
self,
sub: str,
start: int = 0,
end: Optional[int] = None,
skip_escaped: bool = True,
) -> int:
if end is None:
end = len(self._src)
if not skip_escaped or sub not in self.escape_chars:
return self._src.rfind(sub, start, end)
raise Exception("Leo should have implemented this but didn't")
def index(
self,
sub: str,
start: int = 0,
end: Optional[int] = None,
skip_escaped: bool = True,
) -> int:
if end is None:
end = len(self._src)
if not skip_escaped or sub not in self.escape_chars:
return self._src.index(sub, start, end)
index = self._src.index(sub, start, end)
if index == 0:
# If sub was found at index 0, it cannot have been escaped since there is
# nothing behind it! Unlike find(), index() never returns -1 (it raises an
# exception instead), so no need to check for that.
return index
while self._src[index - 1] == "\\":
index = self._src.find(sub, index + 1, end)
return index
def rindex(
self,
sub: str,
start: int = 0,
end: Optional[int] = None,
skip_escaped: bool = True,
) -> int:
if end is None:
end = len(self._src)
if not skip_escaped or sub not in self.escape_chars:
return self._src.rindex(sub, start, end)
raise Exception("Leo should have implemented this but didn't")
def replace(
self, old: str, new: str, count: int = -1, /, skip_escaped: bool = True
) -> "EscapedString":
if not skip_escaped or old not in self.escape_chars:
return self.__class__(self._src.replace(old, new, count))
raise Exception("Leo should have implemented this but didn't")
def startswith(
self,
prefix: str,
start: int = 0,
end: Optional[int] = None,
skip_escaped: bool = True,
) -> bool:
if end == -1:
end = len(self._src)
if len(prefix) == 1:
return self._src.startswith(prefix, start, end)
if not skip_escaped or prefix not in self.escape_chars:
return self._src.startswith(prefix, start, end)
raise Exception("Leo should have implemented this but didn't")
def endswith(
self,
prefix: str,
start: int = 0,
end: Optional[int] = None,
skip_escaped: bool = True,
) -> bool:
if end is None:
end = len(self._src)
if not skip_escaped or prefix not in self.escape_chars:
return self._src.endswith(prefix, start, end)
raise Exception("Leo should have implemented this but didn't")
def split(
self,
/,
sep: Optional[str] = None,
maxsplit: int = -1,
skip_escaped: bool = True,
) -> list[str]:
if not skip_escaped or sep not in self.escape_chars:
return self._src.split(sep, maxsplit)
runs = []
prev = 0
curr = 0
while curr < len(self._src):
if self._src[curr] in self.escape_chars and (
curr > 0 and self._src[curr - 1] != "\\"
):
runs.append(self._src[prev + 1 : curr - 1])
prev = curr
curr += 1
runs.append(self._src[prev + 1 : curr - 1])
return runs | /rsm_markup-0.2.4-cp310-cp310-macosx_12_0_x86_64.whl/rsm/util.py | 0.827201 | 0.234735 | util.py | pypi |
import logging
from collections import defaultdict
from itertools import count
from string import ascii_uppercase
from typing import Generator, Optional, Type
from icecream import ic
from . import nodes
logger = logging.getLogger("RSM").getChild("tform")
class RSMTransformerError(Exception):
pass
class Transformer:
"""Apply transformations to the abstract syntax tree."""
def __init__(self) -> None:
self.tree: Optional[nodes.Manuscript] = None
self.labels_to_nodes: dict[str, nodes.Node] = {}
def transform(self, tree: nodes.Manuscript) -> nodes.Manuscript:
logger.info("Transforming...")
self.tree = tree
self.collect_labels()
self.resolve_pending_references()
self.add_necessary_subproofs()
self.autonumber_nodes()
self.make_toc()
self.add_keywords_to_constructs()
return tree
def collect_labels(self) -> None:
for node in self.tree.traverse(condition=lambda n: n.label):
if node.label in self.labels_to_nodes:
logger.warning(f"Duplicate label {node.label}, using first encountered")
node.label = ""
continue
self.labels_to_nodes[node.label] = node
def _label_to_node(self, label: str, default=nodes.Error) -> nodes.Node:
try:
return self.labels_to_nodes[label]
except KeyError as e:
logger.warning(f'Reference to nonexistent label "{label}"')
return default(f'[unknown label "{label}"]')
def resolve_pending_references(self) -> None:
classes = [
nodes.PendingReference,
nodes.PendingCite,
nodes.PendingPrev,
]
counter = count()
for pending in self.tree.traverse(condition=lambda n: type(n) in classes):
if isinstance(pending, nodes.PendingReference):
target = self._label_to_node(pending.target)
if isinstance(target, nodes.Error):
pending.replace_self(target)
else:
pending.replace_self(
nodes.Reference(
target=target,
overwrite_reftext=pending.overwrite_reftext,
)
)
elif isinstance(pending, nodes.PendingCite):
targets = [
self._label_to_node(label, nodes.UnknownBibitem)
for label in pending.targetlabels
]
cite = nodes.Cite(targets=targets)
cite.label = f"cite-{next(counter)}"
pending.replace_self(cite)
for tgt in targets:
tgt.backlinks.append(cite.label)
elif isinstance(pending, nodes.PendingPrev):
try:
step = pending.first_ancestor_of_type(nodes.Step)
except AttributeError:
step = None
if step is None:
raise RSMTransformerError("Found :prev: tag outside proof step")
target = step
for _ in range(int(str(pending.target))):
target = target.prev_sibling(nodes.Step)
if target is None:
raise RSMTransformerError(
f"Did not find previous {pending.target} step(s)"
)
pending.replace_self(
nodes.Reference(
target=target, overwrite_reftext=pending.overwrite_reftext
)
)
for pending in self.tree.traverse(condition=lambda n: type(n) in classes):
raise RSMTransformerError("Found unresolved pending reference")
def add_necessary_subproofs(self) -> None:
for step in self.tree.traverse(nodeclass=nodes.Step):
if not step.children:
continue
_, split_at_idx = step.first_of_type(
(nodes.Step, nodes.Subproof), return_idx=True
)
if split_at_idx is None:
split_at_idx = len(step.children)
children = step.children[::]
step.clear()
statement = nodes.Statement()
statement.append(children[:split_at_idx])
if split_at_idx == len(children):
step.append(statement)
continue
if isinstance(children[split_at_idx], nodes.Step):
subproof = nodes.Subproof()
subproof.append(children[split_at_idx:])
elif isinstance(children[split_at_idx], nodes.Subproof):
assert split_at_idx == len(children) - 1
subproof = children[split_at_idx]
else:
raise RSMTransformerError("How did we get here?")
step.append([statement, subproof])
def autonumber_nodes(self) -> None:
counts: dict[Type[nodes.Node], dict[Type[nodes.Node], Generator]] = defaultdict(
lambda: defaultdict(lambda: count(start=1))
)
within_appendix = False
for node in self.tree.traverse():
if isinstance(node, nodes.Appendix):
counts[nodes.Manuscript] = defaultdict(lambda: iter(ascii_uppercase))
within_appendix = True
continue
if isinstance(node, (nodes.Proof, nodes.Subproof)):
self._autonumber_steps(node)
continue
if isinstance(node, nodes.Step):
continue
if node.autonumber and not node.nonum:
counts[type(node)] = defaultdict(lambda: count(start=1))
num = next(counts[node.number_within][node.number_as])
node.number = num
if within_appendix and isinstance(node, nodes.Section):
node.reftext_template = node.reftext_template.replace(
"{nodeclass}", "Appendix"
)
def _autonumber_steps(self, proof: nodes.Proof) -> None:
step_gen = (s for s in proof.children if isinstance(s, nodes.Step))
for idx, step in enumerate(step_gen, start=1):
step.number = idx
def make_toc(self) -> None:
toc = None
for node in self.tree.traverse(nodeclass=nodes.Contents):
if toc is None:
toc = node
else:
logger.warning("Multiple Tables of Content found, using only first one")
node.remove_self()
if toc is None:
return
current_parent = toc
for sec in self.tree.traverse(nodeclass=nodes.Section):
item = nodes.Item()
# Sections with no number are still displayed in the TOC, while sub- or
# subsubsections are simply ignored
if sec.nonum and isinstance(node, nodes.Subsection):
continue
reftext = f"{sec.title}" if sec.nonum else f"{sec.full_number}. {sec.title}"
item.append(nodes.Reference(target=sec, overwrite_reftext=reftext))
if type(sec) is nodes.Section:
toc.append(item)
if sec.first_of_type(nodes.Subsection):
itemize = nodes.Itemize()
item.append(itemize)
current_parent = itemize
elif type(sec) is nodes.Subsection:
current_parent.append(item)
if sec.first_of_type(nodes.Subsubsection):
itemize = nodes.Itemize()
item.append(itemize)
current_parent = itemize
else:
current_parent.append(item)
def add_keywords_to_constructs(self) -> None:
for construct in self.tree.traverse(nodeclass=nodes.Construct):
keyword = nodes.Keyword()
keyword.append(nodes.Text(f"{construct.keyword} "))
construct.prepend(keyword)
kind = construct.kind
assert kind
construct.types.append(kind)
if kind not in {"then", "suffices", "claim", "claimblock", "qed"}:
construct.types.append("assumption") | /rsm_markup-0.2.4-cp310-cp310-macosx_12_0_x86_64.whl/rsm/transformer.py | 0.77193 | 0.277644 | transformer.py | pypi |
import logging
from pathlib import Path
from typing import Any, Callable, NamedTuple, Optional, Union
from icecream import ic
from rsm import (
builder,
linter,
reader,
rsmlogger,
transformer,
translator,
tsparser,
writer,
)
from .rsmlogger import GatherHandler
logger = logging.getLogger("RSM")
class RSMApplicationError(Exception):
pass
class Task(NamedTuple):
"""A step in a :class:`Pipeline`."""
name: str
obj: Any
run: Callable
class Pipeline:
"""A sequence of :class:`Task`s."""
def __init__(self, tasks: list[Task]):
self.tasks: list[Task] = []
for t in tasks:
self.add_task(t)
def add_task(self, task: Task) -> None:
"""Add a task at the end of the current pipeline."""
self.tasks.append(task)
setattr(self, task.name, task.obj)
def pop_task(self) -> Task:
"""Remove and return the last task."""
task = self.tasks.pop()
delattr(self, task.name)
return task
def run(self, initial_args: Any = None) -> Any:
"""Execute every task in the pipeline serially."""
res = initial_args
for _, _, call in self.tasks:
if isinstance(res, dict):
res = call(**res)
elif isinstance(res, (list, tuple)):
res = call(*res)
elif res is None:
res = call()
else:
res = call(res)
return res
class RSMApp(Pipeline):
default_log_level = logging.WARNING
def __init__(
self,
tasks: Optional[list[Task]] = None,
loglevel: int = default_log_level,
log_format: str = "rsm",
log_time: bool = True,
):
rsmlogger.config_rsm_logger(loglevel, log_format, log_time)
logger.info("Application started")
logger.info("Configuring...")
# self.config = self.config.configure()
super().__init__(tasks or [])
def run(self, initial_args: Any = None) -> Optional[str]:
result = super().run(initial_args)
logger.info("Done.")
return result
def _setup_handler(self) -> None:
target = logging.StreamHandler()
target.setLevel(logging.WARNING)
handler = GatherHandler([logging.WARNING], target)
logger.addHandler(handler)
@property
def logs(self) -> list:
return []
class ParserApp(RSMApp):
def __init__(
self,
srcpath: Optional[Path] = None,
plain: str = "",
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
):
self._validate_srcpath_and_plain(srcpath, plain)
tasks = []
if not plain:
tasks.append(Task("reader", r := reader.Reader(), lambda: r.read(srcpath)))
else:
tasks.append(Task("dummy", None, lambda: plain))
tasks += [
Task("parser", p := tsparser.TSParser(), p.parse),
Task("transformer", t := transformer.Transformer(), t.transform),
]
super().__init__(tasks, loglevel, log_format, log_time)
@staticmethod
def _validate_srcpath_and_plain(
srcpath: Union[Path, str, None], plain: str
) -> None:
if (not srcpath and not plain) or (srcpath and plain):
raise RSMApplicationError("Must specify exactly one of srcpath, plain")
class LinterApp(ParserApp):
def __init__(
self,
srcpath: Optional[Path] = None,
plain: str = "",
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
):
super().__init__(srcpath, plain, linter.Linter.LINT_LVL, log_format, log_time)
mylinter = linter.Linter()
self.add_task(Task("linter", mylinter, mylinter.lint))
class ProcessorApp(ParserApp):
def __init__(
self,
srcpath: Optional[Path] = None,
plain: str = "",
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
handrails: bool = False,
run_linter: bool = False,
):
super().__init__(srcpath, plain, loglevel, log_format, log_time)
if run_linter:
self.add_task(Task("linter", l := linter.Linter(), l.lint))
tr = translator.HandrailsTranslator() if handrails else translator.Translator()
self.add_task(Task("translator", tr, tr.translate))
class FullBuildApp(ProcessorApp):
def __init__(
self,
srcpath: Optional[Path] = None,
plain: str = "",
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
handrails: bool = True,
run_linter: bool = False,
):
super().__init__(
srcpath, plain, loglevel, log_format, log_time, handrails, run_linter
)
self.add_task(Task("builder", b := builder.FullBuilder(), b.build))
self.add_task(Task("writer", w := writer.Writer(), w.write))
def render(
source: str = "",
path: str = "",
handrails: bool = False,
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
) -> str:
return ProcessorApp(
srcpath=path,
plain=source,
handrails=handrails,
loglevel=loglevel,
log_format=log_format,
log_time=log_time,
).run()
def lint(
source: str = "",
path: str = "",
handrails: bool = False,
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
):
return LinterApp(
srcpath=path,
plain=source,
loglevel=loglevel,
log_format=log_format,
log_time=log_time,
).run()
def make(
source: str = "",
path: str = "",
handrails: bool = True,
lint: bool = True,
loglevel: int = RSMApp.default_log_level,
log_format: str = "rsm",
log_time: bool = True,
) -> str:
return FullBuildApp(
srcpath=path,
plain=source,
run_linter=lint,
loglevel=loglevel,
log_format=log_format,
log_time=log_time,
).run() | /rsm_markup-0.2.4-cp310-cp310-macosx_12_0_x86_64.whl/rsm/app.py | 0.830697 | 0.24353 | app.py | pypi |
import logging
import textwrap
from collections.abc import Iterable
from datetime import datetime
from pathlib import Path
from typing import (
Any,
Callable,
ClassVar,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
from icecream import ic
logger = logging.getLogger("RSM").getChild("nodes")
NodeSubType = TypeVar("NodeSubType", bound="Node")
class RSMNodeError(Exception):
pass
class Node:
"""A node in the manuscript tree.
A node represents a semantically meaningful element of the manuscript.
Parameters
----------
label
Unique identifier for this node.
types
Types of this node.
number
Node number.
nonum
Whether this node should be automatically given a number.
reftext_template
If not empty, replaces :attr:`classreftext`.
See Also
--------
:class:`Manuscript` : The class of the root node of a manuscript tree.
:class:`NodeWithChildren` : Subclass that implements methods to handle children.
Notes
-----
An instance of Node cannot have children. Only instances of (subclasses) of
:class:`NodeWithChildren` may have them. However, for the sake of having a uniform
API, Node implements the property :attr:`children`, which always returns an empty
tuple.
"""
classreftext: ClassVar[str] = "{nodeclass} {number}"
possible_parents: ClassVar[set[Type["NodeWithChildren"]]] = set()
"""Allowed types of parent Nodes.
When setting the parent of a Node, this attribute is checked to see whether the
intended parent has an admissible type.
Examples
--------
This is a class variable
>>> nodes.Item.possible_parents == {nodes.Itemize, nodes.Enumerate, nodes.Contents}
True
This variable is checked when setting the parent directly.
>>> it = nodes.Item()
>>> it.parent = nodes.Paragraph()
Traceback (most recent call last):
rsm.nodes.RSMNodeError: Node of type <class 'rsm.nodes.Item'> cannot have parent of type <class 'rsm.nodes.Paragraph'>
This is also used when setting the parent in some other indirect way, for example
when calling :meth:`~NodeWithChildren.append` on the desired parent.
>>> nodes.Paragraph().append(it)
Traceback (most recent call last):
rsm.nodes.RSMNodeError: Node of type <class 'rsm.nodes.Item'> cannot have parent of type <class 'rsm.nodes.Paragraph'>
Allowed parents proceed without raising.
>>> nodes.Itemize().append(it)
Itemize(parent=None, [Item])
"""
autonumber: ClassVar[bool] = False
"""Whether to automatically assign a number to this node during transform step.
Examples
--------
>>> msc, thm1, thm2 = nodes.Manuscript(), nodes.Theorem(), nodes.Theorem()
>>> (thm1.number, thm2.number) == (None, None)
True
>>> tform = rsm.transformer.Transformer()
>>> tform.transform(msc.append([thm1, thm2])) # doctest: +IGNORE_RESULT
>>> thm1.number, thm2.number
(1, 2)
"""
_number_within: ClassVar[Optional[Type["Node"]]] = None
# see property number_within for documentation
_number_as: ClassVar[Optional[Type["Node"]]] = None
# see property number_as for documentation
newmetakeys: ClassVar[set] = {"label", "types", "nonum", "reftext"}
"""Meta keys to add to those of the parent class.
.. important::
Only use this when defining a new Node subclass. When dealing with Node
isntances, do not access this attribute directly neither for reading nor writing.
Always use :meth:`metakeys` in that case.
See Also
--------
:meth:`metakeys`
Examples
--------
The keys in `newmetakeys` are added to the meta keys of the parent class.
>>> nodes.Heading.newmetakeys
{'title'}
>>> nodes.Heading.metakeys() == nodes.Node.metakeys() | {"title"}
True
The intended use, and only supported use, of `newmetakeys` is at the time of class
definition.
>>> class NewNode(nodes.Node):
... newmetakeys = {"newkey"}
>>> NewNode.metakeys() == nodes.Node.metakeys() | {"newkey"}
True
"""
def __init__(
self,
label: str = "",
types: Optional[list[str]] = None,
number: Optional[int] = None,
nonum: bool = False,
reftext_template: str = "",
) -> None:
self.label: str = label
"""Unique identifier."""
self.types: list[str] = types or []
"""Types of this node."""
self.number: int = number
"""Node number."""
self.nonum: bool = nonum
"""Whether this node should be automatically given a number."""
self.reftext_template: str = reftext_template or self.classreftext
"""Reftext template, or "" to use :attr:`classreftext`."""
self._parent: Optional["NodeWithChildren"] = None
def _attrs_for_repr_and_eq(self) -> list[str]:
return ["label", "types", "nonum", "number", "parent"]
def __repr__(self) -> str:
cls = self.__class__.__name__
d = {
att: getattr(self, att)
for att in self._attrs_for_repr_and_eq()
if att != "children"
}
d["parent"] = (
"None" if self.parent is None else f"{self.parent.__class__.__name__}"
)
d_str = ", ".join(f"{k}={v}" for k, v in d.items() if v)
return f"{cls}({d_str})"
def __eq__(self, other: Any) -> bool:
attrs = self._attrs_for_repr_and_eq()
try:
return all(
(mine is getattr(other, a))
if isinstance(mine := getattr(self, a), Node)
else (mine == getattr(other, a))
for a in attrs
)
except AttributeError:
return False
def sexp(
self,
tab_width: int = 2,
meta: bool = False,
ignore_meta_keys: Optional[set] = None,
) -> str:
"""Represent this node as an S expression.
Parameters
----------
tab_width
How many spaces of indentation at each depth level.
meta
Whether to include meta in the output.
ignore_meta_keys
When `meta` is ``True``, this controls which meta keys to include.
Returns
-------
A string representation of the Node and its descendents.
See Also
--------
:meth:`traverse`
Examples
--------
>>> p1, p2, p3, p4 = [nodes.Paragraph().append(nodes.Text()) for _ in range(4)]
>>> msc = nodes.Manuscript().append(
... [
... nodes.Section().append(p1),
... nodes.Section().append([nodes.Subsection().append([p2, p3])]),
... nodes.Section().append(p4),
... ]
... )
By default, meta keys are not included in the output.
>>> print(msc.sexp())
(Manuscript
(Section
(Paragraph
(Text)))
(Section
(Subsection
(Paragraph
(Text))
(Paragraph
(Text))))
(Section
(Paragraph
(Text))))
:meth:`sexp` is useful when transforming the tree.
>>> p3.replace_self(nodes.Paragraph().append(nodes.Span(strong=True).append(nodes.Text())))
>>> print(msc.sexp())
(Manuscript
(Section
(Paragraph
(Text)))
(Section
(Subsection
(Paragraph
(Text))
(Paragraph
(Span
(Text)))))
(Section
(Paragraph
(Text))))
Output meta for even more debugging information.
>>> print(msc.sexp(meta=True))
(Manuscript { :reftext: Manuscript }
(Section { :reftext: Section }
(Paragraph { :reftext: Paragraph }
(Text { :reftext: Text })))
(Section { :reftext: Section }
(Subsection { :reftext: Section }
(Paragraph { :reftext: Paragraph }
(Text { :reftext: Text }))
(Paragraph { :reftext: Paragraph }
(Span { :reftext: Span , :strong: True }
(Text { :reftext: Text })))))
(Section { :reftext: Section }
(Paragraph { :reftext: Paragraph }
(Text { :reftext: Text }))))
Use `ignore_meta_keys` for a less verbose output.
>>> print(msc.sexp(meta=True, ignore_meta_keys={"reftext"}))
(Manuscript { }
(Section { }
(Paragraph { }
(Text { })))
(Section { }
(Subsection { }
(Paragraph { }
(Text { }))
(Paragraph { }
(Span { :strong: True }
(Text { })))))
(Section { }
(Paragraph { }
(Text { }))))
"""
ignore_meta_keys = set() if ignore_meta_keys is None else set(ignore_meta_keys)
exp = ""
stack = [(0, self)]
while stack:
indent, node = stack.pop()
if node is None:
exp += ")"
continue
spaces = " " * indent
exp += f"\n{spaces}({node.__class__.__name__}"
if meta:
meta_str = (
"{ "
+ ", ".join(
[
f":{key}: {val}"
for key in sorted(node.metakeys())
if key not in ignore_meta_keys
and (val := getattr(node, key))
]
)
+ " }"
)
exp += f" {meta_str}"
stack.append((None, None))
if node.children:
stack += [(indent + tab_width, c) for c in reversed(node.children)]
return exp[1:] # get rid of an extra newline at the start
@classmethod
def metakeys(cls: Type["Node"]) -> set[str]:
"""The valid meta keys of the given class.
Returns
-------
The valid meta keys.
Examples
--------
>>> all_nodes_meta = {"label", "types", "nonum", "reftext"}
>>> nodes.Node.metakeys() == all_nodes_meta
True
>>> nodes.Span.metakeys() - all_nodes_meta == {"strong", "emphas", "little", "insert", "delete"}
True
>>> nodes.Author.metakeys() - all_nodes_meta == {"name", "affiliation", "email"}
True
"""
return cls.newmetakeys.union(
*[b.metakeys() for b in cls.__bases__ if hasattr(b, "metakeys")]
)
@property
def parent(self) -> Optional["NodeWithChildren"]:
"""The parent of this Node, or None."""
return self._parent
@parent.setter
def parent(self, node: Optional["NodeWithChildren"]) -> None:
if node is None:
self._parent = None
elif not self.__class__.possible_parents:
self._parent = node
else:
possible_parents = self.__class__.possible_parents
if possible_parents and type(node) not in possible_parents:
raise RSMNodeError(
f"Node of type {type(self)} cannot have parent of type {type(node)}"
)
self._parent = node
@property
def children(self) -> tuple:
"""Tuple with this Node's children."""
return tuple() # necessary for methods such as Nodes.traverse
@property
def number_within(self) -> Type["Node"]:
return self.__class__._number_within or Manuscript
@property
def number_as(self) -> Type["Node"]:
return self._number_as or self.__class__
@property
def full_number(self) -> str:
if self.nonum:
return None
ancestor = self.first_ancestor_of_type(self.number_within)
if not ancestor:
logger.warning(
f"{self.__class__.__name__} numbered within "
f"{self.number_within.__name__} but no such ancestor was found; "
"using root node instead"
)
ancestor = self.first_ancestor_of_type(Manuscript)
if ancestor and ancestor.full_number:
return f"{ancestor.full_number}.{self.number}"
return f"{self.number}" if self.number else ""
@property
def reftext(self) -> str:
return self.reftext_template.format(
nodeclass=self.__class__.__name__, number=self.full_number
)
def traverse(
self,
*,
condition: Optional[Callable[["Node"], bool]] = None,
nodeclass: Optional[NodeSubType] = None,
) -> Generator[NodeSubType, None, None]:
"""Generate the descendents of this Node in depth-first order.
By default, yield this node and then every descendent in depth-first order. If
`condition` is given, yield only those Nodes that satisfy the condition. If
`nodeclass` is given, it overrides `condition` and only those descendents of the
specified type are yielded.
Parameters
----------
condition
Callable that receives a single argument, a descendent Node, and returns
whether it should be yielded.
nodeclass
A Node subclass of the desired yielded descendent Nodes. If not None,
`condition` is ignored.
Yields
-------
:class:`Node`
See Also
--------
:meth:`sexp`
Notes
-----
Passing ``nodeclass=<NodeSubType>`` is equivalent to passing ``condition=lambda
n: isinstance(n, <NodeSubType>)``.
Examples
--------
>>> p1, p2, p3, p4 = [nodes.Paragraph().append(nodes.Text()) for _ in range(4)]
>>> msc = nodes.Manuscript().append(
... [
... nodes.Section().append(p1),
... nodes.Section().append([nodes.Subsection().append([p2, p3])]),
... nodes.Section().append(p4),
... ]
... )
Visit every descendent, including self.
>>> for n in msc.traverse(): print(n)
Manuscript(parent=None, [Section, Section, Section])
Section(parent=Manuscript, [Paragraph])
Paragraph(parent=Section, [Text])
Text("")
Section(parent=Manuscript, [Subsection])
Subsection(parent=Section, [Paragraph, Paragraph])
Paragraph(parent=Subsection, [Text])
Text("")
Paragraph(parent=Subsection, [Text])
Text("")
Section(parent=Manuscript, [Paragraph])
Paragraph(parent=Section, [Text])
Text("")
Use `nodeclass` to yield only nodes of a specified type. Note that subclasses
are also yielded.
>>> for n in msc.traverse(nodeclass=nodes.Section): print(n)
Section(parent=Manuscript, [Paragraph])
Section(parent=Manuscript, [Subsection])
Subsection(parent=Section, [Paragraph, Paragraph])
Section(parent=Manuscript, [Paragraph])
Yield only nodes satisfying an arbitrary condition
>>> msc.children[1].nonum = True
>>> for n in msc.traverse(condition=lambda n: n.nonum): print(n)
Section(nonum=True, parent=Manuscript, [Subsection])
"""
if condition is None:
condition = lambda n: True
if nodeclass is not None:
if issubclass(nodeclass, Node):
condition = lambda n: isinstance(n, nodeclass)
else:
raise RSMNodeError("nodeclass must inherit from Node")
stack = [self]
while stack:
node = stack.pop()
if condition(node):
yield cast(NodeSubType, node)
stack += node.children[::-1]
def first_of_type(
self, cls: Union[Type["Node"], tuple[Type["Node"]]], return_idx: bool = False
) -> Union[Optional["Node"], tuple[Optional["Node"], Optional[int]]]:
"""First child of the specified type.
Parameters
----------
cls
Desired class of the child.
return_idx
Whether to return the index of the child among this node's children.
Returns
-------
Node
The first child of the specified type, or None.
Node, int
If `return_idx` is True, return (child, index), or (None, None).
See Also
--------
:meth:`last_of_type`
:meth:`first_ancestor_of_type`
Examples
--------
>>> p = nodes.Paragraph().append([nodes.Text("one"), nodes.Text("two")])
>>> p.first_of_type(nodes.Text)
Text("one")
>>> p.first_of_type(nodes.Text, return_idx=True)
(Text("one"), 0)
The index counts all existing children.
>>> p.prepend(nodes.Span())
Paragraph(parent=None, [Span, Text, Text])
>>> p.first_of_type(nodes.Text, return_idx=True)
(Text("one"), 1)
"""
for idx, child in enumerate(self.children):
if isinstance(child, cls):
return (child, idx) if return_idx else child
return (None, None) if return_idx else None
def last_of_type(
self, cls: Union[Type["Node"], tuple[Type["Node"]]], return_idx: bool = False
) -> Union[Optional["Node"], tuple[Optional["Node"], Optional[int]]]:
"""Last child of the specified type.
For details, see :meth:`first_of_type`.
Examples
--------
>>> p = nodes.Paragraph().append([nodes.Span(), nodes.Text("one"), nodes.Text("two")])
>>> p.last_of_type(nodes.Text, return_idx=True)
(Text("two"), 2)
"""
for idx, child in enumerate(reversed(self.children)):
if isinstance(child, cls):
return (child, len(self.children) - idx - 1) if return_idx else child
def prev_sibling(
self, cls: Union[Type["Node"], str, None] = None
) -> Optional["Node"]:
"""The previous sibling, optionally of a specified type.
Parameters
----------
cls
The type of the desired sibling. If ``"self"``, search for the previous
sibling with the same type as this node. If ``None``, return the
immediately preceding sibling, regardless of its type.
Returns
-------
The desired sibling, or None.
See Also
--------
:meth:`first_ancestor_of_type`
:meth:`next_sibling`
Examples
--------
>>> p, s, t1, t2 = nodes.Paragraph(), nodes.Span(), nodes.Text("one"), nodes.Text("two")
>>> p.append([t1, s, t2]) # doctest: +IGNORE_RESULT
>>> t2.prev_sibling()
Span(parent=Paragraph)
>>> t2.prev_sibling(nodes.Text)
Text("one")
>>> t1.prev_sibling() is None
True
Use ``"self"`` to find nodes of the same type.
>>> s2 = nodes.Span()
>>> p.append(s2)
Paragraph(parent=None, [Text, Span, Text, Span])
>>> s2.prev_sibling() is t2
True
>>> s2.prev_sibling("self") is s
True
"""
if self.parent is None:
return None
ids = [id(c) for c in self.parent.children]
index = ids.index(id(self))
if cls is None and index:
return self.parent.children[index - 1]
if cls == "self":
cls = self.__class__
cls = cast(Type["Node"], cls)
prev_sibs = self.parent.children[:index]
for node in reversed(prev_sibs):
if isinstance(node, cls):
return node
return None
def next_sibling(self, cls: Optional[Type["Node"]] = None) -> Optional["Node"]:
"""The next sibling, optionally of a specified type.
For details, see :meth:`prev_sibling`.
See Also
--------
:meth:`prev_sibling`
"""
if self.parent is None:
return None
ids = [id(c) for c in self.parent.children]
index = ids.index(id(self))
if cls is None and index < len(self.parent.children) - 1:
return self.parent.children[index + 1]
if cls == "self":
cls = self.__class__
cls = cast(Type["Node"], cls)
next_sibs = self.parent.children[index + 1 :]
for node in next_sibs:
if isinstance(node, cls):
return node
return None
def first_ancestor_of_type(
self, cls: Union[Type["Node"], tuple[Type["Node"]]]
) -> Optional["Node"]:
"""First ancestor of the specified type.
Parameters
----------
cls
Desired class of the ancestor.
Returns
-------
The first ancestor of the specified type, or None.
See Also
--------
:attr:`parent`
:meth:`first_of_type`
Examples
--------
Given the tree
>>> t = nodes.Text("Hello")
>>> p = nodes.Paragraph().append(nodes.Span().append(t))
>>> print(p.sexp())
(Paragraph
(Span
(Text)))
Find an ancestor of a desired type.
>>> t.first_ancestor_of_type(nodes.Paragraph)
Paragraph(parent=None, [Span])
Always check the return value against ``None``.
>>> t.first_ancestor_of_type(nodes.Manuscript) is None
True
"""
ancestor = self.parent
# We use `type is not cls` instead of the recommended `isinstance()` because we
# are looking for an exact type, not a subtype. For example, we may want to
# find the enclosing Section of a Theorem, bypassing any Subsections that may
# lie in between.
while ancestor and (
all(type(ancestor) is not c for c in cls)
if isinstance(cls, tuple)
else (type(ancestor) is not cls)
):
ancestor = ancestor.parent
return ancestor # the root node has parent None
def replace_self(self, replacement: Union["Node", Iterable["Node"]]) -> None:
"""Replace this node in its parent's children.
This is mostly useful during the transform step.
Parameters
----------
replacement
The Node or Nodes to replace this with.
Raises
------
RSMNodeError
If this Node's parent is None.
See Also
--------
:meth:`remove_self`
Examples
--------
Wrap a Text in a strong Span to render it in bold face.
>>> p, t = nodes.Paragraph(), nodes.Text("one")
>>> p.append(t) # doctest: +IGNORE_RESULT
>>> print(p.sexp())
(Paragraph
(Text))
>>> s = nodes.Span(strong=True)
>>> t.replace_self(s)
>>> s.append(t) # doctest: +IGNORE_RESULT
>>> print(p.sexp())
(Paragraph
(Span
(Text)))
Note the call to :meth:`replace_self` must happen *before* the Text is added to
the Span.
May also replace with a list of Nodes.
>>> t.replace_self([nodes.Text("new one"), nodes.Text("two")])
>>> print(p.sexp())
(Paragraph
(Span
(Text)
(Text)))
The following recipe uses the above example to wrap every Text within ``root``
in a strong Span. Note this is done to each Text descendent of ``root``,
regardless of depth, and without any reference to their original immediate
parents.
>>> root = nodes.Manuscript().append(...) # doctest: +SKIP
>>> for t in root.traverse(nodeclass=Text): # doctest: +SKIP
... s = nodes.Span(strong=True)
... t.replace_self(s)
... s.append(t)
"""
if not self.parent:
raise RSMNodeError("Can only call replace_self on a node with parent")
ids = [id(c) for c in self.parent.children]
index = ids.index(id(self))
parent = self.parent
self.parent.remove(self)
if not isinstance(replacement, Node):
for idx, rep in enumerate(replacement):
parent._children.insert(index + idx, rep)
rep.parent = parent
else:
parent._children.insert(index, replacement)
replacement.parent = parent
def remove_self(self) -> None:
"""Remove this Node from its parent's children.
See Also
--------
:meth:`replace_self`
Examples
--------
>>> t = nodes.Text("remove me")
>>> p = nodes.Paragraph().append(t)
>>> p.children
(Text("remove me"),)
>>> t.remove_self()
>>> p.children
()
"""
if self.parent is not None:
self.parent.remove(self)
self.parent = None
def ingest_dict_as_meta(self, meta: dict) -> None:
if "reftext" in meta:
meta["reftext_template"] = meta["reftext"]
del meta["reftext"]
for key, value in meta.items():
setattr(self, str(key), value)
class NodeWithChildren(Node):
"""A :class:`Node` that may have children Nodes."""
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._children: list[Node] = []
def _attrs_for_repr_and_eq(self) -> list[str]:
return super()._attrs_for_repr_and_eq() + ["children"]
def __repr__(self) -> str:
if not self._children:
return super().__repr__()
children_repr = ", ".join(f"{c.__class__.__name__}" for c in self._children)
children_repr = "[" + children_repr + "]"
ret = super().__repr__()
return ret[:-1] + ", " + children_repr + ")"
@property
def children(self) -> tuple[Node, ...]:
"""Tuple with this Node's children."""
return tuple(self._children)
def clear(self) -> None:
"""Remove all children."""
for c in self._children:
c.parent = None
self._children = []
def append(self, child: Union[Node, Iterable[Node]]) -> "NodeWithChildren":
"""Add a child or children after all current children.
Parameters
----------
child
Node or iterable or nodes to append.
Returns
-------
self
Raises
------
RSMNodeError
When attempting to add as a child a node that already has a parent.
See Also
--------
:meth:`prepend`
Examples
--------
Append one or many children.
>>> p, t1, t2, t3 = nodes.Paragraph(), nodes.Text("one"), nodes.Text("two"), nodes.Text("three")
>>> p.append(t1) # doctest: +IGNORE_RESULT
>>> p.append([t2, t3]) # doctest: +IGNORE_RESULT
>>> p.children
(Text("one"), Text("two"), Text("three"))
Calls can be chained.
>>> p, t1, t2, t3 = nodes.Paragraph(), nodes.Text("one"), nodes.Text("two"), nodes.Text("three")
>>> p.append(t1).append([t2, t3]) # doctest: +IGNORE_RESULT
>>> p.children
(Text("one"), Text("two"), Text("three"))
Will raise when trying to append a node that already has a parent.
>>> p2 = nodes.Paragraph()
>>> p2.append(t1)
Traceback (most recent call last):
rsm.nodes.RSMNodeError: Attempting to append child to a new parent
This is the case even when appending to the same parent.
>>> t4 = nodes.Text("four")
>>> p2.append(t4) # doctest: +IGNORE_RESULT
>>> p2.append(t4)
Traceback (most recent call last):
rsm.nodes.RSMNodeError: Attempting to append child to a new parent
"""
if isinstance(child, Iterable):
for c in child:
self.append(c)
elif isinstance(child, Node):
if child.parent:
raise RSMNodeError("Attempting to append child to a new parent")
self._children.append(child)
child.parent = self
else:
raise TypeError("Can only append a Node or iterable of Nodes as children")
return self
def prepend(self, child: Union[Node, Iterable[Node]]) -> None:
"""Add a child or children before all current children.
For details, see :meth:`append`.
See Also
--------
:meth:`append`
"""
if isinstance(child, list):
for c in reversed(child):
self.prepend(c)
elif isinstance(child, Node):
if child.parent and child.parent is not self:
raise RSMNodeError("Attempting to prepend child to a different parent")
self._children.insert(0, child)
child.parent = self
else:
raise TypeError("Can only prepend a Node or iterable of Nodes as children")
return self
def remove(self, child: "Node") -> None:
"""Remove child."""
ids = [id(c) for c in self._children]
index = ids.index(id(child))
del self._children[index]
child.parent = None
class Text(Node):
"""Plain text node. Cannot contain children."""
def __init__(self, text: str = "", asis: bool = False, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.text = text
self.asis = asis
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{textwrap.shorten(self.text, 60)}")'
class Error(Text):
"""Error node.
Notes
-----
When the parser encounters an error, this node is created at the location where the
error is found. Note this inherits from :class:`Text`; the text contents of this
Node should indicate where in the source file the error occurred.
"""
class Span(NodeWithChildren):
newmetakeys: ClassVar[set] = {"strong", "emphas", "little", "insert", "delete"}
attr_to_tag: ClassVar[dict] = {
"strong": "strong",
"emphas": "em",
"little": "small",
"insert": "ins",
"delete": "del",
}
def __init__(
self,
strong: bool = False,
emphas: bool = False,
little: bool = False,
insert: bool = False,
delete: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.strong = strong
self.emphas = emphas
self.little = little
self.insert = insert
self.delete = delete
class Heading(NodeWithChildren):
newmetakeys: ClassVar[set] = {"title"}
def __init__(self, title: str = "", **kwargs: Any) -> None:
super().__init__(**kwargs)
self.title = title
class Manuscript(Heading):
newmetakeys: ClassVar[set] = {"date"}
nonum = True
def __init__(
self, src: str = "", date: Optional[datetime] = None, **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.src = src
self.date = date
@property
def full_number(self) -> str:
return ""
class Author(Node):
"""An author of the manuscript.
Notes
-----
A Manuscript may have more than one Author node.
Examples
--------
.. rsm::
:manuscript:
:author:
:name: Melvin J. Noir
:affiliation: ACME University
:email: mel@acme.edu
::
::
"""
newmetakeys: ClassVar[set] = {"name", "affiliation", "email"}
def __init__(
self, name: str = "", affiliation: str = "", email: str = "", **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.name: str = name
"""Full name of the author."""
self.affiliation: str = affiliation
"""Institutional affiliation."""
self.email: str = email
"""Contact information."""
class Abstract(NodeWithChildren):
"""Manuscript abstract.
Notes
-----
By convention, abstracts contain only paragraphs and not other blocks. This
convention is not enforced, but it may be assumed to be the case during the
translation step.
Examples
--------
.. rsm::
:manuscript:
:abstract:
:keywords: {cosmology, general relativity, black holes}
Black holes emit radiation.
::
::
"""
newmetakeys: ClassVar[set] = {"keywords", "msc"}
def __init__(
self,
keywords: Optional[list[str]] = None,
msc: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.keywords = keywords or []
"""Manuscript keywords."""
self.msc = msc or []
"""Mathematics Subject Classification (MSC) codes."""
class Section(Heading):
autonumber = True
level: ClassVar[int] = 2
class Subsection(Section):
_number_within = Section
level: ClassVar[int] = 3
classreftext: ClassVar[str] = "Section {number}"
class Subsubsection(Section):
_number_within = Subsection
level: ClassVar[int] = 4
classreftext: ClassVar[str] = "Section {number}"
class BaseParagraph(Heading):
"""Foo."""
class Paragraph(BaseParagraph):
pass
class Note(BaseParagraph):
pass
class Enumerate(NodeWithChildren):
pass
class Itemize(NodeWithChildren):
pass
class Keyword(Span):
pass
class Construct(NodeWithChildren):
kind_to_keyword: dict[str, str] = {
"let": "LET",
"define": "DEFINE",
"write": "WRITE",
"case": "CASE",
"then": "THEN",
"new": "NEW",
"assume": "ASSUME",
"prove": "PROVE",
"claim": "⊢",
"claimblock": "⊢",
"qed": "QED",
}
def __init__(self, kind: str = "", **kwargs: Any):
super().__init__(**kwargs)
self.kind = kind
@property
def keyword(self):
return self.kind_to_keyword[self.kind]
class ClaimBlock(Construct):
def __init__(self, **kwargs: Any):
super().__init__(kind="claimblock", **kwargs)
class Math(NodeWithChildren):
pass
class Code(NodeWithChildren):
pass
class MathBlock(NodeWithChildren):
autonumber = True
_number_within = Section
classreftext: ClassVar[str] = "({number})"
newmetakeys: ClassVar[set] = {"isclaim"}
def __init__(self, isclaim: bool = False, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.isclaim = isclaim
class CodeBlock(NodeWithChildren):
classreftext: ClassVar[str] = "Code Listing {number}"
class Algorithm(NodeWithChildren):
r"""Algorithm pseudocode.
Notes
-----
The contents of an Algorithm node are not RSM markup. They must be written in
LaTeX-style notation using the `algorithmic` [1]_ environment.
RSM uses `pseudocode.js` [2]_ to render algorithms in the frontend.
References
----------
.. [1] https://www.overleaf.com/learn/latex/Algorithms#The_algpseudocode_and_algorithm_packages
.. [2] https://saswat.padhi.me/pseudocode.js/
Examples
--------
.. rsm::
:manuscript:
:algorithm:
\begin{algorithm}
\caption{Quicksort}
\begin{algorithmic}
\PROCEDURE{Quicksort}{$A, p, r$}
\IF{$p < r$}
\STATE $q = $ \CALL{Partition}{$A, p, r$}
\STATE \CALL{Quicksort}{$A, p, q - 1$}
\STATE \CALL{Quicksort}{$A, q + 1, r$}
\ENDIF
\ENDPROCEDURE
\PROCEDURE{Partition}{$A, p, r$}
\STATE $x = A[r]$
\STATE $i = p - 1$
\FOR{$j = p$ \TO $r - 1$}
\IF{$A[j] < x$}
\STATE $i = i + 1$
\STATE exchange
$A[i]$ with $A[j]$
\ENDIF
\STATE exchange $A[i]$ with $A[r]$
\ENDFOR
\ENDPROCEDURE
\end{algorithmic}
\end{algorithm}
::
::
"""
autonumber = True
class Appendix(Node):
"""Mark the start of the Appendix sections.
The Appendix node currently has no visible output in the manuscript. Instead, it
affects how subsequent sections are numbered.
Notes
-----
The Appendix must not contain children. In fact, its parent subclass is
:class:`Node`, not :class:`NodeWithChildren`. Sections "in" the Appendix should
simply appear after an Appendix node.
Examples
--------
In RSM markup, the Appendix is a stamp, i.e. it does not need a closing Halmos.
.. code-block:: text
:manuscript:
# Before Appendix
::
# Before Appendix
## Subsec
::
::
:appendix:
# After Appendix
::
::
The above source is parsed into a Manuscript tree equivalent to the following.
>>> msc = nodes.Manuscript().append(
... [
... nodes.Section(title="Before Appendix"),
... nodes.Section(title="Before Appendix").append(nodes.Subsection(title="Subsec")),
... nodes.Appendix(),
... nodes.Section(title="After Appendix"),
... ]
... )
>>> print(msc.sexp())
(Manuscript
(Section)
(Section
(Subsection))
(Appendix)
(Section))
Run the transform step on this Manuscript so the Sections will be autonumbered.
>>> tform = rsm.transformer.Transformer()
>>> tform.transform(msc)
Manuscript(parent=None, [Section, Section, Appendix, Section])
By default, Sections appearing after the Appendix receive letter numerals.
>>> for sec in msc.traverse(nodeclass=nodes.Section):
... print(f'{sec.full_number}. {sec.title}')
1. Before Appendix
2. Before Appendix
2.1. Subsec
A. After Appendix
"""
class BaseReference(Node):
def __init__(
self,
target: Union[str, Node, None] = None,
overwrite_reftext: str = "",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.overwrite_reftext = overwrite_reftext
self.target = target
def _attrs_for_repr_and_eq(self) -> list[str]:
return super()._attrs_for_repr_and_eq() + ["target", "overwrite_reftext"]
class PendingReference(BaseReference):
def __init__(self, target: str = "", **kwargs: Any) -> None:
super().__init__(target, **kwargs)
class Reference(BaseReference):
def __init__(self, target: Optional[Node] = None, **kwargs: Any) -> None:
super().__init__(target, **kwargs)
class PendingPrev(BaseReference):
def __init__(self, target: str = "", **kwargs: Any) -> None:
super().__init__(target, **kwargs)
class URL(BaseReference):
def __init__(self, target: str = "", **kwargs: Any) -> None:
super().__init__(target, **kwargs)
class PendingCite(Node):
def __init__(self, targetlabels: list[str] = None, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.targetlabels = targetlabels or []
class Cite(Node):
def __init__(self, targets: Optional[list[Node]] = None, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.targets = targets or []
class Statement(NodeWithChildren):
pass
class Proof(NodeWithChildren):
pass
class Subproof(NodeWithChildren): # importantly, NOT a subclass of Proof!
pass
class Sketch(NodeWithChildren):
possible_parents: ClassVar[set[Type["NodeWithChildren"]]] = {Proof}
class Step(Paragraph):
autonumber = True
possible_parents: ClassVar[set[Type["NodeWithChildren"]]] = {Proof, Subproof}
Step.possible_parents.add(Step)
Step._number_within = (Step, Proof)
class Theorem(Heading):
autonumber = True
title = ""
_number_within = Section
newmetakeys: ClassVar[set] = {"title", "goals", "stars", "clocks"}
def __init__(
self,
title: str = "",
goals: Optional[list[BaseReference]] = None,
stars: int = 0,
clocks: int = 0,
**kwargs: Any,
):
super().__init__(*kwargs)
self.title = title
self.goals = goals or []
self.stars = stars
self.clocks = clocks
class Lemma(Theorem):
_number_as = Theorem
class Proposition(Theorem):
_number_as = Theorem
class Remark(Theorem):
_number_as = Theorem
class Definition(Theorem):
_number_as = Theorem
class Bibliography(NodeWithChildren):
pass
class Bibitem(Node):
autonumber = True
classreftext: ClassVar[str] = "{number}"
newmetakeys: ClassVar[set] = {
"kind",
"author",
"title",
"year",
"journal",
"volume",
"number",
"publisher",
"doi",
}
def __init__(
self,
kind: str = "",
author: str = "",
title: str = "",
year: int = -1,
journal: str = "",
volume: int = -1,
number: int = -1,
publisher: str = "",
doi: str = "",
**kwargs: Any,
):
super().__init__(**kwargs)
self.kind = kind
self.author = author
self.title = title
self.year = year
self.journal = journal
self.volume = volume
self.number = number
self.publisher = publisher
self.doi = doi
self.backlinks = []
class UnknownBibitem(Bibitem):
def __init__(self, number: Union[str, int] = "?", **kwargs: Any) -> None:
super().__init__(**kwargs)
self.number = number
class Figure(NodeWithChildren):
autonumber = True
_number_within = Section
newmetakeys: ClassVar[set] = {"path", "scale"}
def __init__(
self, path: Union[Path, str] = "", scale: float = 1.0, **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.path = Path(path)
self.scale = scale
class Draft(NodeWithChildren):
pass
class Table(NodeWithChildren):
autonumber = True
class TableHead(NodeWithChildren):
pass
class TableBody(NodeWithChildren):
pass
class TableRow(NodeWithChildren):
pass
class TableDatum(NodeWithChildren):
pass
class Caption(Paragraph):
possible_parents: ClassVar[set[Type["NodeWithChildren"]]] = {Figure, Table}
class Contents(Itemize):
pass
class Item(BaseParagraph):
possible_parents: ClassVar[set[Type["NodeWithChildren"]]] = {
Itemize,
Enumerate,
Contents,
} | /rsm_markup-0.2.4-cp310-cp310-macosx_12_0_x86_64.whl/rsm/nodes.py | 0.895762 | 0.359898 | nodes.py | pypi |
import sys
from argparse import ArgumentParser, Namespace
from importlib.metadata import version
from typing import Callable, Optional
import livereload
from rsm import app
from rsm.tsparser import RSMParserError
def init_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument(
"src",
help="RSM source path",
)
parser.add_argument(
"-c",
"--string",
help="interpret src as a source string, not a path",
action="store_true",
)
parser.add_argument(
"-r",
"--handrails",
help="output handrails",
action="store_true",
)
parser.add_argument(
"-s",
"--suppress-output",
help="do not show output, only the logs",
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
help="verbosity",
action="count",
default=0,
)
parser.add_argument(
"--log-format",
help="format for logs",
choices=["plain", "rsm", "json", "lint"],
default="rsm",
)
parser.add_argument(
"--log-exclude-time",
dest="log_time",
help="exclude timestamp in logs",
action="store_false",
)
parser.add_argument(
"-V",
"--version",
help="rsm-markup version",
action="version",
version=f'rsm-markup v{version("rsm-markup")}',
)
return parser
def main(
parser: ArgumentParser, func: Callable, args: Optional[Namespace] = None
) -> int:
if args is None:
args = parser.parse_args()
kwargs = dict(
handrails=args.handrails,
loglevel=app.RSMApp.default_log_level - args.verbose * 10,
log_format=args.log_format,
log_time=args.log_time,
)
if args.string:
kwargs["source"] = args.src
else:
kwargs["path"] = args.src
output = func(**kwargs)
if not args.suppress_output and output:
print(output)
return 0
def render() -> int:
parser = init_parser()
return main(parser, app.render)
def lint() -> int:
parser = init_parser()
parser.set_defaults(log_format="lint")
parser.set_defaults(suppress_output=True)
return main(parser, app.lint)
def make() -> int:
parser = init_parser()
parser.add_argument("--serve", help="serve and autoreload", action="store_true")
parser.set_defaults(handrails=True)
args = parser.parse_args()
if args.serve:
other_args = [a for a in sys.argv if a != "--serve"]
cmd = " ".join(other_args)
server = livereload.Server()
server.watch(args.src, livereload.shell(cmd))
server.serve(root=".")
else:
main(parser, app.make, args)
return 0 | /rsm_markup-0.2.4-cp310-cp310-macosx_12_0_x86_64.whl/rsm/cli.py | 0.619126 | 0.154312 | cli.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rsnd_distributions-0.1.tar.gz/rsnd_distributions-0.1/rsnd_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import os
import math
import sys
import tensorflow as tf
import random
import imghdr
_RANDOM_SEED = 0
_TRAIN_NUM_SHARDS = 200
class ImageReader(object):
def __init__(self):
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
self._decode_png = tf.image.decode_png(self._decode_jpeg_data, channels = 3)
self._decode_gif = tf.image.decode_gif(self._decode_jpeg_data)
def read_image_dims(self, sess, image_data, image_type):
if image_type == "jpeg" or image_type == "jpg":
image = self.decode_jpeg(sess, image_data)
else:
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def is_jpeg(self, sess, image_data):
r = tf.image.is_jpeg(image_data)
b = sess.run(r)
return b
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png, feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_dataset_filename(dataset_dir, split_name, shard_id, num_shards):
output_filename = 'nsfw_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, num_shards)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation', 'test']
if split_name == 'train':
num_shards = _TRAIN_NUM_SHARDS
else:
num_shards = 10
num_per_shard = int(math.ceil(len(filenames) / float(num_shards)))
print(num_per_shard)
total_skip = 0
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(num_shards):
output_filename = _get_dataset_filename(dataset_dir, split_name, shard_id, num_shards)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
image_type = imghdr.what(filenames[i])
image_type = image_type if image_type else 'jpg'
try:
height, width = image_reader.read_image_dims(sess, image_data, image_type)
except Exception as e:
print(filenames[i])
print(e)
continue
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = image_to_tfexample(
image_data, b'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
print(total_skip)
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
def int64_feature(values):
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def _get_filenames_and_classes(dataset_dir, mode):
flower_root = os.path.join(dataset_dir, mode)
directories = []
class_names = []
for filename in os.listdir(flower_root):
path = os.path.join(flower_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
_NUM_VALIDATION = 4000
def main(dataset_dir):
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir, 'train')
test_photo_filenames, test_class_names = _get_filenames_and_classes(dataset_dir, 'test')
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
_convert_dataset('train', photo_filenames, class_names_to_ids, dataset_dir)
_convert_dataset('validation', test_photo_filenames, class_names_to_ids, dataset_dir)
if __name__ == "__main__":
"""
"""
dataset_dir = sys.argv[1]
main(dataset_dir) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/convert_image_to_tfrecod.py | 0.503174 | 0.37843 | convert_image_to_tfrecod.py | pypi |
"""Flags which will be nearly universal across models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from utils.flags._conventions import help_wrap
from utils.logs import hooks_helper
def define_base(data_dir=True, model_dir=True, clean=True, train_epochs=True,
epochs_between_evals=True, stop_threshold=True, batch_size=True,
num_gpu=True, hooks=True, export_dir=True):
"""Register base flags.
Args:
data_dir: Create a flag for specifying the input data directory.
model_dir: Create a flag for specifying the model file directory.
train_epochs: Create a flag to specify the number of training epochs.
epochs_between_evals: Create a flag to specify the frequency of testing.
stop_threshold: Create a flag to specify a threshold accuracy or other
eval metric which should trigger the end of training.
batch_size: Create a flag to specify the batch size.
num_gpu: Create a flag to specify the number of GPUs used.
hooks: Create a flag to specify hooks for logging.
export_dir: Create a flag to specify where a SavedModel should be exported.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if data_dir:
flags.DEFINE_string(
name="data_dir", short_name="dd", default="/tmp",
help=help_wrap("The location of the input data."))
key_flags.append("data_dir")
if model_dir:
flags.DEFINE_string(
name="model_dir", short_name="md", default="/tmp",
help=help_wrap("The location of the model checkpoint files."))
key_flags.append("model_dir")
if clean:
flags.DEFINE_boolean(
name="clean", default=False,
help=help_wrap("If set, model_dir will be removed if it exists."))
key_flags.append("clean")
if train_epochs:
flags.DEFINE_integer(
name="train_epochs", short_name="te", default=1,
help=help_wrap("The number of epochs used to train."))
key_flags.append("train_epochs")
if epochs_between_evals:
flags.DEFINE_integer(
name="epochs_between_evals", short_name="ebe", default=1,
help=help_wrap("The number of training epochs to run between "
"evaluations."))
key_flags.append("epochs_between_evals")
if stop_threshold:
flags.DEFINE_float(
name="stop_threshold", short_name="st",
default=None,
help=help_wrap("If passed, training will stop at the earlier of "
"train_epochs and when the evaluation metric is "
"greater than or equal to stop_threshold."))
if batch_size:
flags.DEFINE_integer(
name="batch_size", short_name="bs", default=32,
help=help_wrap("Batch size for training and evaluation. When using "
"multiple gpus, this is the global batch size for "
"all devices. For example, if the batch size is 32 "
"and there are 4 GPUs, each GPU will get 8 examples on "
"each step."))
key_flags.append("batch_size")
if num_gpu:
flags.DEFINE_integer(
name="num_gpus", short_name="ng",
default=1 if tf.test.is_gpu_available() else 0,
help=help_wrap(
"How many GPUs to use with the DistributionStrategies API. The "
"default is 1 if TensorFlow can detect a GPU, and 0 otherwise."))
if hooks:
# Construct a pretty summary of hooks.
hook_list_str = (
u"\ufeff Hook:\n" + u"\n".join([u"\ufeff {}".format(key) for key
in hooks_helper.HOOKS]))
flags.DEFINE_list(
name="hooks", short_name="hk", default="LoggingTensorHook",
help=help_wrap(
u"A list of (case insensitive) strings to specify the names of "
u"training hooks.\n{}\n\ufeff Example: `--hooks ProfilerHook,"
u"ExamplesPerSecondHook`\n See official.utils.logs.hooks_helper "
u"for details.".format(hook_list_str))
)
key_flags.append("hooks")
if export_dir:
flags.DEFINE_string(
name="export_dir", short_name="ed", default=None,
help=help_wrap("If set, a SavedModel serialization of the model will "
"be exported to this directory at the end of training. "
"See the README for more details and relevant links.")
)
key_flags.append("export_dir")
return key_flags
def get_num_gpus(flags_obj):
"""Treat num_gpus=-1 as 'use all'."""
if flags_obj.num_gpus != -1:
return flags_obj.num_gpus
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
local_device_protos = device_lib.list_local_devices()
return sum([1 for d in local_device_protos if d.device_type == "GPU"]) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/flags/_base.py | 0.937911 | 0.392861 | _base.py | pypi |
"""Flags for managing compute devices. Currently only contains TPU flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from utils.flags._conventions import help_wrap
def require_cloud_storage(flag_names):
"""Register a validator to check directory flags.
Args:
flag_names: An iterable of strings containing the names of flags to be
checked.
"""
msg = "TPU requires GCS path for {}".format(", ".join(flag_names))
@flags.multi_flags_validator(["tpu"] + flag_names, message=msg)
def _path_check(flag_values): # pylint: disable=missing-docstring
if flag_values["tpu"] is None:
return True
valid_flags = True
for key in flag_names:
if not flag_values[key].startswith("gs://"):
tf.logging.error("{} must be a GCS path.".format(key))
valid_flags = False
return valid_flags
def define_device(tpu=True):
"""Register device specific flags.
Args:
tpu: Create flags to specify TPU operation.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if tpu:
flags.DEFINE_string(
name="tpu", default=None,
help=help_wrap(
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a "
"grpc://ip.address.of.tpu:8470 url. Passing `local` will use the"
"CPU of the local instance instead. (Good for debugging.)"))
key_flags.append("tpu")
flags.DEFINE_string(
name="tpu_zone", default=None,
help=help_wrap(
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_string(
name="tpu_gcp_project", default=None,
help=help_wrap(
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_integer(name="num_tpu_shards", default=8,
help=help_wrap("Number of shards (TPU chips)."))
return key_flags | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/flags/_device.py | 0.907978 | 0.24842 | _device.py | pypi |
"""Register flags for optimizing performance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from absl import flags # pylint: disable=g-bad-import-order
import tensorflow as tf # pylint: disable=g-bad-import-order
from utils.flags._conventions import help_wrap
# Map string to (TensorFlow dtype, default loss scale)
DTYPE_MAP = {
"fp16": (tf.float16, 128),
"fp32": (tf.float32, 1),
}
def get_tf_dtype(flags_obj):
return DTYPE_MAP[flags_obj.dtype][0]
def get_loss_scale(flags_obj):
if flags_obj.loss_scale is not None:
return flags_obj.loss_scale
return DTYPE_MAP[flags_obj.dtype][1]
def define_performance(num_parallel_calls=True, inter_op=True, intra_op=True,
synthetic_data=True, max_train_steps=True, dtype=True,
all_reduce_alg=True):
"""Register flags for specifying performance tuning arguments.
Args:
num_parallel_calls: Create a flag to specify parallelism of data loading.
inter_op: Create a flag to allow specification of inter op threads.
intra_op: Create a flag to allow specification of intra op threads.
synthetic_data: Create a flag to allow the use of synthetic data.
max_train_steps: Create a flags to allow specification of maximum number
of training steps
dtype: Create flags for specifying dtype.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if num_parallel_calls:
flags.DEFINE_integer(
name="num_parallel_calls", short_name="npc",
default=multiprocessing.cpu_count(),
help=help_wrap("The number of records that are processed in parallel "
"during input processing. This can be optimized per "
"data set but for generally homogeneous data sets, "
"should be approximately the number of available CPU "
"cores. (default behavior)"))
if inter_op:
flags.DEFINE_integer(
name="inter_op_parallelism_threads", short_name="inter", default=0,
help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details.")
)
if intra_op:
flags.DEFINE_integer(
name="intra_op_parallelism_threads", short_name="intra", default=0,
help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details."))
if synthetic_data:
flags.DEFINE_bool(
name="use_synthetic_data", short_name="synth", default=False,
help=help_wrap(
"If set, use fake data (zeroes) instead of a real dataset. "
"This mode is useful for performance debugging, as it removes "
"input processing steps, but will not learn anything."))
if max_train_steps:
flags.DEFINE_integer(
name="max_train_steps", short_name="mts", default=None, help=help_wrap(
"The model will stop training if the global_step reaches this "
"value. If not set, training will run until the specified number "
"of epochs have run as usual. It is generally recommended to set "
"--train_epochs=1 when using this flag."
))
if dtype:
flags.DEFINE_enum(
name="dtype", short_name="dt", default="fp32",
enum_values=DTYPE_MAP.keys(),
help=help_wrap("The TensorFlow datatype used for calculations. "
"Variables may be cast to a higher precision on a "
"case-by-case basis for numerical stability."))
flags.DEFINE_integer(
name="loss_scale", short_name="ls", default=None,
help=help_wrap(
"The amount to scale the loss by when the model is run. Before "
"gradients are computed, the loss is multiplied by the loss scale, "
"making all gradients loss_scale times larger. To adjust for this, "
"gradients are divided by the loss scale before being applied to "
"variables. This is mathematically equivalent to training without "
"a loss scale, but the loss scale helps avoid some intermediate "
"gradients from underflowing to zero. If not provided the default "
"for fp16 is 128 and 1 for all other dtypes."))
loss_scale_val_msg = "loss_scale should be a positive integer."
@flags.validator(flag_name="loss_scale", message=loss_scale_val_msg)
def _check_loss_scale(loss_scale): # pylint: disable=unused-variable
if loss_scale is None:
return True # null case is handled in get_loss_scale()
return loss_scale > 0
if all_reduce_alg:
flags.DEFINE_string(
name="all_reduce_alg", short_name="ara", default=None,
help=help_wrap("Defines the algorithm to use for performing all-reduce."
"See tf.contrib.distribute.AllReduceCrossTowerOps for "
"more details and available options."))
return key_flags | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/flags/_performance.py | 0.900631 | 0.290301 | _performance.py | pypi |
"""Flags for benchmarking models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from utils.flags._conventions import help_wrap
def define_benchmark(benchmark_log_dir=True, bigquery_uploader=True):
"""Register benchmarking flags.
Args:
benchmark_log_dir: Create a flag to specify location for benchmark logging.
bigquery_uploader: Create flags for uploading results to BigQuery.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
flags.DEFINE_enum(
name="benchmark_logger_type", default="BaseBenchmarkLogger",
enum_values=["BaseBenchmarkLogger", "BenchmarkFileLogger",
"BenchmarkBigQueryLogger"],
help=help_wrap("The type of benchmark logger to use. Defaults to using "
"BaseBenchmarkLogger which logs to STDOUT. Different "
"loggers will require other flags to be able to work."))
flags.DEFINE_string(
name="benchmark_test_id", short_name="bti", default=None,
help=help_wrap("The unique test ID of the benchmark run. It could be the "
"combination of key parameters. It is hardware "
"independent and could be used compare the performance "
"between different test runs. This flag is designed for "
"human consumption, and does not have any impact within "
"the system."))
if benchmark_log_dir:
flags.DEFINE_string(
name="benchmark_log_dir", short_name="bld", default=None,
help=help_wrap("The location of the benchmark logging.")
)
if bigquery_uploader:
flags.DEFINE_string(
name="gcp_project", short_name="gp", default=None,
help=help_wrap(
"The GCP project name where the benchmark will be uploaded."))
flags.DEFINE_string(
name="bigquery_data_set", short_name="bds", default="test_benchmark",
help=help_wrap(
"The Bigquery dataset name where the benchmark will be uploaded."))
flags.DEFINE_string(
name="bigquery_run_table", short_name="brt", default="benchmark_run",
help=help_wrap("The Bigquery table name where the benchmark run "
"information will be uploaded."))
flags.DEFINE_string(
name="bigquery_run_status_table", short_name="brst",
default="benchmark_run_status",
help=help_wrap("The Bigquery table name where the benchmark run "
"status information will be uploaded."))
flags.DEFINE_string(
name="bigquery_metric_table", short_name="bmt",
default="benchmark_metric",
help=help_wrap("The Bigquery table name where the benchmark metric "
"information will be uploaded."))
@flags.multi_flags_validator(
["benchmark_logger_type", "benchmark_log_dir"],
message="--benchmark_logger_type=BenchmarkFileLogger will require "
"--benchmark_log_dir being set")
def _check_benchmark_log_dir(flags_dict):
benchmark_logger_type = flags_dict["benchmark_logger_type"]
if benchmark_logger_type == "BenchmarkFileLogger":
return flags_dict["benchmark_log_dir"]
return True
return key_flags | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/flags/_benchmark.py | 0.930616 | 0.180992 | _benchmark.py | pypi |
"""Functions specific to running TensorFlow on TPUs."""
import tensorflow as tf
# "local" is a magic word in the TPU cluster resolver; it informs the resolver
# to use the local CPU as the compute device. This is useful for testing and
# debugging; the code flow is ostensibly identical, but without the need to
# actually have a TPU on the other end.
LOCAL = "local"
def construct_scalar_host_call(metric_dict, model_dir, prefix=""):
"""Construct a host call to log scalars when training on TPU.
Args:
metric_dict: A dict of the tensors to be logged.
model_dir: The location to write the summary.
prefix: The prefix (if any) to prepend to the metric names.
Returns:
A tuple of (function, args_to_be_passed_to_said_function)
"""
# type: (dict, str) -> (function, list)
metric_names = list(metric_dict.keys())
def host_call_fn(global_step, *args):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
global_step: `Tensor with shape `[batch]` for the global_step
*args: Remaining tensors to log.
Returns:
List of summary ops to run on the CPU host.
"""
step = global_step[0]
with tf.contrib.summary.create_file_writer(
logdir=model_dir, filename_suffix=".host_call").as_default():
with tf.contrib.summary.always_record_summaries():
for i, name in enumerate(metric_names):
tf.contrib.summary.scalar(prefix + name, args[i][0], step=step)
return tf.contrib.summary.all_summary_ops()
# To log the current learning rate, and gradient norm for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1])
other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names]
return host_call_fn, [global_step_tensor] + other_tensors
def embedding_matmul(embedding_table, values, mask, name="embedding_matmul"):
"""Performs embedding lookup via a matmul.
The matrix to be multiplied by the embedding table Tensor is constructed
via an implementation of scatter based on broadcasting embedding indices
and performing an equality comparison against a broadcasted
range(num_embedding_table_rows). All masked positions will produce an
embedding vector of zeros.
Args:
embedding_table: Tensor of embedding table.
Rank 2 (table_size x embedding dim)
values: Tensor of embedding indices. Rank 2 (batch x n_indices)
mask: Tensor of mask / weights. Rank 2 (batch x n_indices)
name: Optional name scope for created ops
Returns:
Rank 3 tensor of embedding vectors.
"""
with tf.name_scope(name):
n_embeddings = embedding_table.get_shape().as_list()[0]
batch_size, padded_size = values.shape.as_list()
emb_idcs = tf.tile(
tf.reshape(values, (batch_size, padded_size, 1)), (1, 1, n_embeddings))
emb_weights = tf.tile(
tf.reshape(mask, (batch_size, padded_size, 1)), (1, 1, n_embeddings))
col_idcs = tf.tile(
tf.reshape(tf.range(n_embeddings), (1, 1, n_embeddings)),
(batch_size, padded_size, 1))
one_hot = tf.where(
tf.equal(emb_idcs, col_idcs), emb_weights,
tf.zeros((batch_size, padded_size, n_embeddings)))
return tf.tensordot(one_hot, embedding_table, 1) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/accelerator/tpu.py | 0.951684 | 0.726498 | tpu.py | pypi |
"""Convenience functions for managing dataset file buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import multiprocessing
import os
import tempfile
import uuid
import numpy as np
import six
import tensorflow as tf
class _GarbageCollector(object):
"""Deletes temporary buffer files at exit.
Certain tasks (such as NCF Recommendation) require writing buffers to
temporary files. (Which may be local or distributed.) It is not generally safe
to delete these files during operation, but they should be cleaned up. This
class keeps track of temporary files created, and deletes them at exit.
"""
def __init__(self):
self.temp_buffers = []
def register(self, filepath):
self.temp_buffers.append(filepath)
def purge(self):
try:
for i in self.temp_buffers:
if tf.gfile.Exists(i):
tf.gfile.Remove(i)
tf.logging.info("Buffer file {} removed".format(i))
except Exception as e:
tf.logging.error("Failed to cleanup buffer files: {}".format(e))
_GARBAGE_COLLECTOR = _GarbageCollector()
atexit.register(_GARBAGE_COLLECTOR.purge)
_ROWS_PER_CORE = 50000
def write_to_temp_buffer(dataframe, buffer_folder, columns):
if buffer_folder is None:
_, buffer_path = tempfile.mkstemp()
else:
tf.gfile.MakeDirs(buffer_folder)
buffer_path = os.path.join(buffer_folder, str(uuid.uuid4()))
_GARBAGE_COLLECTOR.register(buffer_path)
return write_to_buffer(dataframe, buffer_path, columns)
def iter_shard_dataframe(df, rows_per_core=1000):
"""Two way shard of a dataframe.
This function evenly shards a dataframe so that it can be mapped efficiently.
It yields a list of dataframes with length equal to the number of CPU cores,
with each dataframe having rows_per_core rows. (Except for the last batch
which may have fewer rows in the dataframes.) Passing vectorized inputs to
a multiprocessing pool is much more effecient than iterating through a
dataframe in serial and passing a list of inputs to the pool.
Args:
df: Pandas dataframe to be sharded.
rows_per_core: Number of rows in each shard.
Returns:
A list of dataframe shards.
"""
n = len(df)
num_cores = min([multiprocessing.cpu_count(), n])
num_blocks = int(np.ceil(n / num_cores / rows_per_core))
max_batch_size = num_cores * rows_per_core
for i in range(num_blocks):
min_index = i * max_batch_size
max_index = min([(i + 1) * max_batch_size, n])
df_shard = df[min_index:max_index]
n_shard = len(df_shard)
boundaries = np.linspace(0, n_shard, num_cores + 1, dtype=np.int64)
yield [df_shard[boundaries[j]:boundaries[j+1]] for j in range(num_cores)]
def _shard_dict_to_examples(shard_dict):
"""Converts a dict of arrays into a list of example bytes."""
n = [i for i in shard_dict.values()][0].shape[0]
feature_list = [{} for _ in range(n)]
for column, values in shard_dict.items():
if len(values.shape) == 1:
values = np.reshape(values, values.shape + (1,))
if values.dtype.kind == "i":
feature_map = lambda x: tf.train.Feature(
int64_list=tf.train.Int64List(value=x))
elif values.dtype.kind == "f":
feature_map = lambda x: tf.train.Feature(
float_list=tf.train.FloatList(value=x))
else:
raise ValueError("Invalid dtype")
for i in range(n):
feature_list[i][column] = feature_map(values[i])
examples = [
tf.train.Example(features=tf.train.Features(feature=example_features))
for example_features in feature_list
]
return [e.SerializeToString() for e in examples]
def _serialize_shards(df_shards, columns, pool, writer):
"""Map sharded dataframes to bytes, and write them to a buffer.
Args:
df_shards: A list of pandas dataframes. (Should be of similar size)
columns: The dataframe columns to be serialized.
pool: A multiprocessing pool to serialize in parallel.
writer: A TFRecordWriter to write the serialized shards.
"""
# Pandas does not store columns of arrays as nd arrays. stack remedies this.
map_inputs = [{c: np.stack(shard[c].values, axis=0) for c in columns}
for shard in df_shards]
# Failure within pools is very irksome. Thus, it is better to thoroughly check
# inputs in the main process.
for inp in map_inputs:
# Check that all fields have the same number of rows.
assert len(set([v.shape[0] for v in inp.values()])) == 1
for val in inp.values():
assert hasattr(val, "dtype")
assert hasattr(val.dtype, "kind")
assert val.dtype.kind in ("i", "f")
assert len(val.shape) in (1, 2)
shard_bytes = pool.map(_shard_dict_to_examples, map_inputs)
for s in shard_bytes:
for example in s:
writer.write(example)
def write_to_buffer(dataframe, buffer_path, columns, expected_size=None):
"""Write a dataframe to a binary file for a dataset to consume.
Args:
dataframe: The pandas dataframe to be serialized.
buffer_path: The path where the serialized results will be written.
columns: The dataframe columns to be serialized.
expected_size: The size in bytes of the serialized results. This is used to
lazily construct the buffer.
Returns:
The path of the buffer.
"""
if tf.gfile.Exists(buffer_path) and tf.gfile.Stat(buffer_path).length > 0:
actual_size = tf.gfile.Stat(buffer_path).length
if expected_size == actual_size:
return buffer_path
tf.logging.warning(
"Existing buffer {} has size {}. Expected size {}. Deleting and "
"rebuilding buffer.".format(buffer_path, actual_size, expected_size))
tf.gfile.Remove(buffer_path)
if dataframe is None:
raise ValueError(
"dataframe was None but a valid existing buffer was not found.")
tf.gfile.MakeDirs(os.path.split(buffer_path)[0])
tf.logging.info("Constructing TFRecordDataset buffer: {}".format(buffer_path))
count = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count())
try:
with tf.python_io.TFRecordWriter(buffer_path) as writer:
for df_shards in iter_shard_dataframe(df=dataframe,
rows_per_core=_ROWS_PER_CORE):
_serialize_shards(df_shards, columns, pool, writer)
count += sum([len(s) for s in df_shards])
tf.logging.info("{}/{} examples written."
.format(str(count).ljust(8), len(dataframe)))
finally:
pool.terminate()
tf.logging.info("Buffer write complete.")
return buffer_path | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/data/file_io.py | 0.791055 | 0.418519 | file_io.py | pypi |
"""Session hook for logging benchmark metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
class LoggingMetricHook(tf.train.LoggingTensorHook):
"""Hook to log benchmark metric information.
This hook is very similar as tf.train.LoggingTensorHook, which logs given
tensors every N local steps, every N seconds, or at the end. The metric
information will be logged to given log_dir or via metric_logger in JSON
format, which can be consumed by data analysis pipeline later.
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self, tensors, metric_logger=None,
every_n_iter=None, every_n_secs=None, at_end=False):
"""Initializer for LoggingMetricHook.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
metric_logger: instance of `BenchmarkLogger`, the benchmark logger that
hook should use to write the log.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
Raises:
ValueError:
1. `every_n_iter` is non-positive, or
2. Exactly one of every_n_iter and every_n_secs should be provided.
3. Exactly one of log_dir and metric_logger should be provided.
"""
super(LoggingMetricHook, self).__init__(
tensors=tensors,
every_n_iter=every_n_iter,
every_n_secs=every_n_secs,
at_end=at_end)
if metric_logger is None:
raise ValueError("metric_logger should be provided.")
self._logger = metric_logger
def begin(self):
super(LoggingMetricHook, self).begin()
self._global_step_tensor = tf.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use LoggingMetricHook.")
if self._global_step_tensor.name not in self._current_tensors:
self._current_tensors[self._global_step_tensor.name] = (
self._global_step_tensor)
def after_run(self, unused_run_context, run_values):
# should_trigger is a internal state that populated at before_run, and it is
# using self_timer to determine whether it should trigger.
if self._should_trigger:
self._log_metric(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_metric(values)
def _log_metric(self, tensor_values):
self._timer.update_last_triggered_step(self._iter_count)
global_step = tensor_values[self._global_step_tensor.name]
# self._tag_order is populated during the init of LoggingTensorHook
for tag in self._tag_order:
self._logger.log_metric(tag, tensor_values[tag], global_step=global_step) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/logs/metric_hook.py | 0.952064 | 0.37711 | metric_hook.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from utils.logs import hooks
from utils.logs import logger
from utils.logs import metric_hook
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',
'cross_entropy',
'train_accuracy'])
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.logging.warning("hooks_helper received name_list `{}`, but a TPU is "
"specified. No hooks will be used.".format(name_list))
return []
train_hooks = []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter)
def get_profiler_hook(model_dir, save_steps=1000, **kwargs): # pylint: disable=unused-argument
"""Function to get ProfilerHook.
Args:
model_dir: The directory to save the profile traces to.
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return tf.train.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(
batch_size=batch_size, every_n_steps=every_n_steps,
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
def get_logging_metric_hook(tensors_to_log=None,
every_n_secs=600,
**kwargs): # pylint: disable=unused-argument
"""Function to get LoggingMetricHook.
Args:
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
Returns:
Returns a LoggingMetricHook that saves tensor values in a JSON format.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
metric_logger=logger.get_benchmark_logger(),
every_n_secs=every_n_secs)
# A dictionary to map one hook name and its corresponding function
HOOKS = {
'loggingtensorhook': get_logging_tensor_hook,
'profilerhook': get_profiler_hook,
'examplespersecondhook': get_examples_per_second_hook,
'loggingmetrichook': get_logging_metric_hook,
} | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/logs/hooks_helper.py | 0.935206 | 0.261449 | hooks_helper.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from utils.logs import logger
class ExamplesPerSecondHook(tf.train.SessionRunHook):
"""Hook to print out examples per second.
Total time is tracked and then divided by the total number of steps
to get the average step time and then batch_size is used to determine
the running average of examples per second. The examples per second for the
most recent interval is also logged.
"""
def __init__(self,
batch_size,
every_n_steps=None,
every_n_secs=None,
warm_steps=0,
metric_logger=None):
"""Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size across all workers used to calculate
examples/second from global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds. Exactly one of the
`every_n_steps` or `every_n_secs` should be set.
warm_steps: The number of steps to be skipped before logging and running
average calculation. warm_steps steps refers to global steps across all
workers, not on each worker
metric_logger: instance of `BenchmarkLogger`, the benchmark logger that
hook should use to write the log. If None, BaseBenchmarkLogger will
be used.
Raises:
ValueError: if neither `every_n_steps` or `every_n_secs` is set, or
both are set.
"""
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError("exactly one of every_n_steps"
" and every_n_secs should be provided.")
self._logger = metric_logger or logger.BaseBenchmarkLogger()
self._timer = tf.train.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
self._warm_steps = warm_steps
def begin(self):
"""Called once before using the session to check global step."""
self._global_step_tensor = tf.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
"""Called before each call to run().
Args:
run_context: A SessionRunContext object.
Returns:
A SessionRunArgs object or None if never triggered.
"""
return tf.train.SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values): # pylint: disable=unused-argument
"""Called after each call to run().
Args:
run_context: A SessionRunContext object.
run_values: A SessionRunValues object.
"""
global_step = run_values.results
if self._timer.should_trigger_for_step(
global_step) and global_step > self._warm_steps:
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._step_train_time += elapsed_time
self._total_steps += elapsed_steps
# average examples per second is based on the total (accumulative)
# training steps and training time so far
average_examples_per_sec = self._batch_size * (
self._total_steps / self._step_train_time)
# current examples per second is based on the elapsed training steps
# and training time per batch
current_examples_per_sec = self._batch_size * (
elapsed_steps / elapsed_time)
self._logger.log_metric(
"average_examples_per_sec", average_examples_per_sec,
global_step=global_step)
self._logger.log_metric(
"current_examples_per_sec", current_examples_per_sec,
global_step=global_step) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/logs/hooks.py | 0.944293 | 0.330323 | hooks.py | pypi |
"""Helper functions for running models in a distributed setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_distribution_strategy(num_gpus, all_reduce_alg=None):
"""Return a DistributionStrategy for running the model.
Args:
num_gpus: Number of GPUs to run this model.
all_reduce_alg: Specify which algorithm to use when performing all-reduce.
See tf.contrib.distribute.AllReduceCrossTowerOps for available algorithms.
If None, DistributionStrategy will choose based on device topology.
Returns:
tf.contrib.distribute.DistibutionStrategy object.
"""
if num_gpus == 0:
return tf.contrib.distribute.OneDeviceStrategy("device:CPU:0")
elif num_gpus == 1:
return tf.contrib.distribute.OneDeviceStrategy("device:GPU:0")
else:
if all_reduce_alg:
return tf.contrib.distribute.MirroredStrategy(
num_gpus=num_gpus,
cross_tower_ops=tf.contrib.distribute.AllReduceCrossTowerOps(
all_reduce_alg, num_packs=num_gpus))
else:
return tf.contrib.distribute.MirroredStrategy(num_gpus=num_gpus)
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that this should eventually be handled by DistributionStrategies
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = ("When running with multiple GPUs, batch size "
"must be a multiple of the number of available GPUs. Found {} "
"GPUs with a batch size of {}; try --batch_size={} instead."
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/misc/distribution_utils.py | 0.933195 | 0.364693 | distribution_utils.py | pypi |
"""Miscellaneous functions that can be called by models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import tensorflow as tf
from tensorflow.python.util import nest
def past_stop_threshold(stop_threshold, eval_metric):
"""Return a boolean representing whether a model should be stopped.
Args:
stop_threshold: float, the threshold above which a model should stop
training.
eval_metric: float, the current value of the relevant metric to check.
Returns:
True if training should stop, False otherwise.
Raises:
ValueError: if either stop_threshold or eval_metric is not a number
"""
if stop_threshold is None:
return False
if not isinstance(stop_threshold, numbers.Number):
raise ValueError("Threshold for checking stop conditions must be a number.")
if not isinstance(eval_metric, numbers.Number):
raise ValueError("Eval metric being checked against stop conditions "
"must be a number.")
if eval_metric >= stop_threshold:
tf.logging.info(
"Stop threshold of {} was passed with metric value {}.".format(
stop_threshold, eval_metric))
return True
return False
def generate_synthetic_data(
input_shape, input_value=0, input_dtype=None, label_shape=None,
label_value=0, label_dtype=None):
"""Create a repeating dataset with constant values.
Args:
input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
the input data.
input_value: Value of each input element.
input_dtype: Input dtype. If None, will be inferred by the input value.
label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
the label data.
label_value: Value of each input element.
label_dtype: Input dtype. If None, will be inferred by the target value.
Returns:
Dataset of tensors or tuples of tensors (if label_shape is set).
"""
# TODO(kathywu): Replace with SyntheticDataset once it is in contrib.
element = input_element = nest.map_structure(
lambda s: tf.constant(input_value, input_dtype, s), input_shape)
if label_shape:
label_element = nest.map_structure(
lambda s: tf.constant(label_value, label_dtype, s), label_shape)
element = (input_element, label_element)
return tf.data.Dataset.from_tensors(element).repeat()
def apply_clean(flags_obj):
if flags_obj.clean and tf.gfile.Exists(flags_obj.model_dir):
tf.logging.info("--clean flag set. Removing existing model dir: {}".format(
flags_obj.model_dir))
tf.gfile.DeleteRecursively(flags_obj.model_dir) | /rsnsfw-0.0.6.tar.gz/rsnsfw-0.0.6/resnet/utils/misc/model_helpers.py | 0.899498 | 0.381882 | model_helpers.py | pypi |
from gym.envs.registration import register
register(id='VSS-v0',
entry_point='rsoccer_gym.vss.env_vss:VSSEnv',
max_episode_steps=1200
)
register(id='VSSMA-v0',
entry_point='rsoccer_gym.vss.env_ma:VSSMAEnv',
max_episode_steps=1200
)
register(id='VSSMAOpp-v0',
entry_point='rsoccer_gym.vss.env_ma:VSSMAOpp',
max_episode_steps=1200
)
register(id='VSSGk-v0',
entry_point='rsoccer_gym.vss.env_gk:rSimVSSGK',
max_episode_steps=1200
)
register(id='VSSFIRA-v0',
entry_point='rsoccer_gym.vss.env_vss:VSSFIRAEnv',
max_episode_steps=1200
)
register(id='SSLGoToBall-v0',
entry_point='rsoccer_gym.ssl.ssl_go_to_ball:SSLGoToBallEnv',
kwargs={'field_type': 2, 'n_robots_yellow': 6},
max_episode_steps=1200
)
register(id='SSLGoToBallIR-v0',
entry_point='rsoccer_gym.ssl.ssl_go_to_ball:SSLGoToBallIREnv',
kwargs={'field_type': 2, 'n_robots_yellow': 6},
max_episode_steps=1200
)
register(id='SSLGoToBallShoot-v0',
entry_point='rsoccer_gym.ssl.ssl_go_to_ball_shoot:SSLGoToBallShootEnv',
kwargs={'field_type': 2, 'random_init': True,
'enter_goal_area': False},
max_episode_steps=2400
)
register(id='SSLStaticDefenders-v0',
entry_point='rsoccer_gym.ssl.ssl_hw_challenge.static_defenders:SSLHWStaticDefendersEnv',
kwargs={'field_type': 2},
max_episode_steps=1000
)
register(id='SSLDribbling-v0',
entry_point='rsoccer_gym.ssl.ssl_hw_challenge.dribbling:SSLHWDribblingEnv',
max_episode_steps=4800
)
register(id='SSLContestedPossession-v0',
entry_point='rsoccer_gym.ssl.ssl_hw_challenge.contested_possession:SSLContestedPossessionEnv',
max_episode_steps=1200
)
register(id='SSLPassEndurance-v0',
entry_point='rsoccer_gym.ssl.ssl_hw_challenge:SSLPassEnduranceEnv',
max_episode_steps=120
)
register(id='SSLPassEnduranceMA-v0',
entry_point='rsoccer_gym.ssl.ssl_hw_challenge:SSLPassEnduranceMAEnv',
max_episode_steps=1200
) | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/__init__.py | 0.608129 | 0.244871 | __init__.py | pypi |
import time
from typing import Dict, List, Optional
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot
from rsoccer_gym.Simulators.rsim import RSimVSS
from rsoccer_gym.Simulators.fira import Fira
class VSSBaseEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
}
NORM_BOUNDS = 1.2
def __init__(self, field_type: int,
n_robots_blue: int, n_robots_yellow: int, time_step: float):
# Initialize Simulator
self.time_step = time_step
self.rsim = RSimVSS(field_type=field_type,
n_robots_blue=n_robots_blue,
n_robots_yellow=n_robots_yellow,
time_step_ms=int(self.time_step*1000))
self.n_robots_blue = n_robots_blue
self.n_robots_yellow = n_robots_yellow
# Get field dimensions
self.field_type = field_type
self.field = self.rsim.get_field_params()
self.max_pos = max(self.field.width / 2, (self.field.length / 2)
+ self.field.penalty_length)
max_wheel_rad_s = (self.field.rbt_motor_max_rpm / 60) * 2 * np.pi
self.max_v = max_wheel_rad_s * self.field.rbt_wheel_radius
# 0.04 = robot radius (0.0375) + wheel thicknees (0.0025)
self.max_w = np.rad2deg(self.max_v / 0.04)
# Initiate
self.frame: Frame = None
self.last_frame: Frame = None
self.view = None
self.steps = 0
self.sent_commands = None
def step(self, action):
self.steps += 1
# Join agent action with environment actions
commands: List[Robot] = self._get_commands(action)
# Send command to simulator
self.rsim.send_commands(commands)
self.sent_commands = commands
# Get Frame from simulator
self.last_frame = self.frame
self.frame = self.rsim.get_frame()
# Calculate environment observation, reward and done condition
observation = self._frame_to_observations()
reward, done = self._calculate_reward_and_done()
return observation, reward, done, {}
def reset(self):
self.steps = 0
self.last_frame = None
self.sent_commands = None
# Close render window
del(self.view)
self.view = None
initial_pos_frame: Frame = self._get_initial_positions_frame()
self.rsim.reset(initial_pos_frame)
# Get frame from simulator
self.frame = self.rsim.get_frame()
return self._frame_to_observations()
def render(self, mode='human') -> None:
'''
Renders the game depending on
ball's and players' positions.
Parameters
----------
None
Returns
-------
None
'''
if self.view == None:
from rsoccer_gym.Render import RCGymRender
self.view = RCGymRender(self.n_robots_blue,
self.n_robots_yellow,
self.field,
simulator='vss')
return self.view.render_frame(self.frame, return_rgb_array=mode == "rgb_array")
def close(self):
self.rsim.stop()
def _get_commands(self, action):
'''returns a list of commands of type List[Robot] from type action_space action'''
raise NotImplementedError
def _frame_to_observations(self):
'''returns a type observation_space observation from a type List[Robot] state'''
raise NotImplementedError
def _calculate_reward_and_done(self):
'''returns reward value and done flag from type List[Robot] state'''
raise NotImplementedError
def _get_initial_positions_frame(self) -> Frame:
'''returns frame with robots initial positions'''
raise NotImplementedError
def norm_pos(self, pos):
return np.clip(
pos / self.max_pos,
-self.NORM_BOUNDS,
self.NORM_BOUNDS
)
def norm_v(self, v):
return np.clip(
v / self.max_v,
-self.NORM_BOUNDS,
self.NORM_BOUNDS
)
def norm_w(self, w):
return np.clip(
w / self.max_w,
-self.NORM_BOUNDS,
self.NORM_BOUNDS
)
class VSSBaseFIRAEnv(VSSBaseEnv):
def __init__(self, field_type: int,
n_robots_blue: int, n_robots_yellow: int, time_step: float):
super().__init__(field_type, n_robots_blue, n_robots_yellow, time_step)
self.rsim = Fira() | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/vss/vss_gym_base.py | 0.841109 | 0.39987 | vss_gym_base.py | pypi |
import math
import os
import random
import time
import gym
import numpy as np
import torch
from rsoccer_gym.Entities import Frame, Robot
from rsoccer_gym.vss.vss_gym_base import VSSBaseEnv
from rsoccer_gym.vss.env_gk.attacker.models import DDPGActor, GaussianPolicy
class rSimVSSGK(VSSBaseEnv):
"""
Description:
This environment controls a single robot football goalkeeper against an attacker in the VSS League 3v3 match
robots_blue[0] -> Goalkeeper
robots_yellow[0] -> Attacker
Observation:
Type: Box(40)
Goalkeeper:
Num Observation normalized
0 Ball X
1 Ball Y
2 Ball Vx
3 Ball Vy
4 + (7 * i) id i Blue Robot X
5 + (7 * i) id i Blue Robot Y
6 + (7 * i) id i Blue Robot sin(theta)
7 + (7 * i) id i Blue Robot cos(theta)
8 + (7 * i) id i Blue Robot Vx
9 + (7 * i) id i Blue Robot Vy
10 + (7 * i) id i Blue Robot v_theta
25 + (5 * i) id i Yellow Robot X
26 + (5 * i) id i Yellow Robot Y
27 + (5 * i) id i Yellow Robot Vx
28 + (5 * i) id i Yellow Robot Vy
29 + (5 * i) id i Yellow Robot v_theta
Attacker:
Num Observation normalized
0 Ball X
1 Ball Y
2 Ball Vx
3 Ball Vy
4 + (7 * i) id i Yellow Robot -X
5 + (7 * i) id i Yellow Robot Y
6 + (7 * i) id i Yellow Robot sin(theta)
7 + (7 * i) id i Yellow Robot -cos(theta)
8 + (7 * i) id i Yellow Robot -Vx
9 + (7 * i) id i Yellow Robot Vy
10 + (7 * i) id i Yellow Robot -v_theta
25 + (5 * i) id i Blue Robot -X
26 + (5 * i) id i Blue Robot Y
27 + (5 * i) id i Blue Robot -Vx
28 + (5 * i) id i Blue Robot Vy
29 + (5 * i) id i Blue Robot -v_theta
Actions:
Type: Box(2, )
Num Action
0 id 0 Blue Robot Wheel 1 Speed (%)
1 id 0 Blue Robot Wheel 2 Speed (%)
Reward:
Sum Of Rewards:
Defense
Ball leaves the goalkeeper's area
Move to Ball_Y
Distance From The Goalkeeper to Your Goal Bar
Penalized By:
Goalkeeper leaves the goalkeeper's area
Starting State:
Random Ball Position
Random Attacker Position
Random Goalkeeper Position Inside the Goalkeeper's Area
Episode Termination:
Attacker Goal
Goalkeeper leaves the goalkeeper's area
Ball leaves the goalkeeper's area
"""
atk_target_rho = 0
atk_target_theta = 0
atk_target_x = 0
atk_target_y = 0
def __init__(self):
super().__init__(field_type=0, n_robots_blue=3, n_robots_yellow=3,
time_step=0.025)
self.action_space = gym.spaces.Box(
low=-1, high=1, shape=(2, ), dtype=np.float32)
self.observation_space = gym.spaces.Box(low=-1,
high=1,
shape=(40,),
dtype=np.float32)
self.last_frame = None
self.energy_penalty = 0
self.reward_shaping_total = None
self.attacker = None
self.previous_ball_direction = []
self.isInside = False
self.ballInsideArea = False
self.load_atk()
print('Environment initialized')
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def load_atk(self):
device = torch.device('cuda')
atk_path = os.path.dirname(os.path.realpath(
__file__)) + '/attacker/atk_model.pth'
self.attacker = DDPGActor(40, 2)
print(atk_path)
atk_checkpoint = torch.load(atk_path, map_location=device)
self.attacker.load_state_dict(atk_checkpoint['state_dict_act'])
self.attacker.eval()
def _atk_obs(self):
observation = []
observation.append(self.norm_pos(-self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(-self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
# we reflect the side that the attacker is attacking,
# so that he will attack towards the goal where the goalkeeper is
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(-self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_yellow[i].theta))
)
observation.append(
-np.cos(np.deg2rad(self.frame.robots_yellow[i].theta))
)
observation.append(self.norm_v(-self.frame.robots_yellow[i].v_x))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_y))
observation.append(self.norm_w(-self.frame.robots_yellow[i].v_theta))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(-self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(self.norm_v(-self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(-self.frame.robots_blue[i].v_theta))
return np.array(observation)
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_x))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_y))
observation.append(self.norm_w(self.frame.robots_yellow[i].v_theta))
return np.array(observation)
def _get_commands(self, actions):
commands = []
self.energy_penalty = -(abs(actions[0] * 100) + abs(actions[1] * 100))
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=0, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
# Send random commands to the other robots
for i in range(1, self.n_robots_blue):
actions = self.ou_actions[i].sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
atk_action = self.attacker.get_action(self._atk_obs())
v_wheel0, v_wheel1 = self._actions_to_v_wheels(atk_action)
# we invert the speed on the wheels because of the attacker's reflection on the Y axis.
commands.append(Robot(yellow=True, id=0, v_wheel0=v_wheel1,
v_wheel1=v_wheel0))
for i in range(1, self.n_robots_yellow):
actions = self.ou_actions[self.n_robots_blue+i].sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
return commands
def _actions_to_v_wheels(self, actions):
left_wheel_speed = actions[0] * self.max_v
right_wheel_speed = actions[1] * self.max_v
left_wheel_speed, right_wheel_speed = np.clip(
(left_wheel_speed, right_wheel_speed), -self.max_v, self.max_v
)
# Deadzone
if -self.v_wheel_deadzone < left_wheel_speed < self.v_wheel_deadzone:
left_wheel_speed = 0
if -self.v_wheel_deadzone < right_wheel_speed < self.v_wheel_deadzone:
right_wheel_speed = 0
# Convert to rad/s
left_wheel_speed /= self.field.rbt_wheel_radius
right_wheel_speed /= self.field.rbt_wheel_radius
return left_wheel_speed , right_wheel_speed
def _calculate_future_point(self, pos, vel):
if vel[0] > 0:
goal_center = np.array([self.field_params['field_length'] / 2, 0])
pos = np.array(pos)
dist = np.linalg.norm(goal_center - pos)
time_to_goal = dist/np.sqrt(vel[0]**2 + vel[1]**2)
future_x = pos[0] + vel[0]*time_to_goal
future_y = pos[1] + vel[1]*time_to_goal
return future_x, future_y
else:
return None
def __move_reward(self):
'''Calculate Move to ball reward
Cosine between the robot vel vector and the vector robot -> ball.
This indicates rather the robot is moving towards the ball or not.
'''
if self.frame.ball.x < self.field_params['field_length'] / 4 - 5:
ball = np.array([self.frame.ball.x, self.frame.ball.y])
robot = np.array([self.frame.robots_blue[0].x,
self.frame.robots_blue[0].y])
robot_vel = np.array([self.frame.robots_blue[0].v_x,
self.frame.robots_blue[0].v_y])
robot_ball = ball - robot
robot_ball = robot_ball/np.linalg.norm(robot_ball)
move_reward = np.dot(robot_ball, robot_vel)
move_reward = np.clip(move_reward / 0.4, -5.0, 5.0)
else:
move_reward = 0
return move_reward
def __move_reward_y(self):
'''Calculate Move to ball_Y reward
Cosine between the robot vel_Y vector and the vector robot_Y -> ball_Y.
This indicates rather the robot is moving towards the ball_Y or not.
'''
ball = np.array([np.clip(self.frame.ball.y, -0.35, 0.35)])
robot = np.array([self.frame.robots_blue[0].y])
robot_vel = np.array([self.frame.robots_blue[0].v_y])
robot_ball = ball - robot
robot_ball = robot_ball/np.linalg.norm(robot_ball)
move_reward = np.dot(robot_ball, robot_vel)
move_reward = np.clip(move_reward / 0.4, -5.0, 5.0)
return move_reward
def __defended_ball(self):
'''Calculate Defended Ball Reward
Create a zone between the goalkeeper and if the ball enters this zone
keep the ball speed vector norm to know the direction it entered,
and if the ball leaves the area in a different direction it means
that the goalkeeper defended the ball.
'''
pos = np.array([self.frame.robots_blue[0].x,
self.frame.robots_blue[0].y])
ball = np.array([self.frame.ball.x, self.frame.ball.y])
distance_gk_ball = np.linalg.norm(pos - ball) * 100
field_half_length = self.field_params['field_length'] / 2
defense_reward = 0
if distance_gk_ball < 8 and not self.isInside:
self.previous_ball_direction.append((self.frame.ball.v_x + 0.000001) / \
(abs(self.frame.ball.v_x)+ 0.000001))
self.previous_ball_direction.append((self.frame.ball.v_y + 0.000001) / \
(abs(self.frame.ball.v_y) + 0.000001))
self.isInside = True
elif self.isInside:
direction_ball_vx = (self.frame.ball.v_x + 0.000001) / \
(abs(self.frame.ball.v_x) + 0.000001)
direction_ball_vy = (self.frame.ball.v_y + 0.000001) / \
(abs(self.frame.ball.v_x) + 0.000001)
if (self.previous_ball_direction[0] != direction_ball_vx or \
self.previous_ball_direction[1] != direction_ball_vy) and \
self.frame.ball.x > -field_half_length+0.1:
self.isInside = False
self.previous_ball_direction.clear()
defense_reward = 1
return defense_reward
def __ball_grad(self):
'''Calculate ball potential gradient
Difference of potential of the ball in time_step seconds.
'''
# Calculate ball potential
length_cm = self.field_params['field_length'] * 100
half_lenght = (self.field_params['field_length'] / 2.0)\
+ self.field_params['goal_depth']
# distance to defence
dx_d = (half_lenght + self.frame.ball.x) * 100
# distance to attack
dx_a = (half_lenght - self.frame.ball.x) * 100
dy = (self.frame.ball.y) * 100
dist_1 = -math.sqrt(dx_a ** 2 + 2 * dy ** 2)
dist_2 = math.sqrt(dx_d ** 2 + 2 * dy ** 2)
ball_potential = ((dist_1 + dist_2) / length_cm - 1) / 2
grad_ball_potential = 0
# Calculate ball potential gradient
# = actual_potential - previous_potential
if self.previous_ball_potential is not None:
diff = ball_potential - self.previous_ball_potential
grad_ball_potential = np.clip(diff * 3 / self.time_step,
-5.0, 5.0)
self.previous_ball_potential = ball_potential
return grad_ball_potential
def _calculate_reward_and_done(self):
done = False
reward = 0
goal_score = 0
move_reward = 0
ball_potential = 0
move_y_reward = 0
dist_robot_own_goal_bar = 0
ball_defense_reward = 0
ball_leave_area_reward = 0
w_defense = 1.8
w_move = 0.2
w_ball_pot = 0.1
w_move_y = 0.3
w_distance = 0.1
w_blva = 2.0
if self.reward_shaping_total is None:
self.reward_shaping_total = {'goal_score': 0, 'move': 0,
'ball_grad': 0, 'energy': 0,
'goals_blue': 0, 'goals_yellow': 0,
'defense': 0,'ball_leave_area': 0,
'move_y': 0, 'distance_own_goal_bar': 0 }
# This case the Goalkeeper leaves the gk area
if self.frame.robots_blue[0].x > -0.63 or self.frame.robots_blue[0].y > 0.4 \
or self.frame.robots_blue[0].y < -0.4:
reward = -5
done = True
self.isInside = False
self.ballInsideArea = False
elif self.last_frame is not None:
self.previous_ball_potential = None
# If the ball entered in the gk area
if (not self.ballInsideArea) and self.frame.ball.x < -0.6 and (self.frame.ball.y < 0.35 \
and self.frame.ball.y > -0.35):
self.ballInsideArea = True
# If the ball entered in the gk area and leaves it
if self.ballInsideArea and (self.frame.ball.x > -0.6 or self.frame.ball.y > 0.35 \
or self.frame.ball.y < -0.35):
ball_leave_area_reward = 1
self.ballInsideArea = False
done = True
# If the enemy scored a goal
if self.frame.ball.x < -(self.field_params['field_length'] / 2):
self.reward_shaping_total['goals_yellow'] += 1
self.reward_shaping_total['goal_score'] -= 1
goal_score = -2
self.ballInsideArea = False
if goal_score != 0:
reward = goal_score
else:
move_reward = self.__move_reward()
move_y_reward = self.__move_reward_y()
ball_defense_reward = self.__defended_ball()
dist_robot_own_goal_bar = -self.field_params['field_length'] / \
2 + 0.15 - self.frame.robots_blue[0].x
reward = w_move_y * move_y_reward + \
w_distance * dist_robot_own_goal_bar + \
w_defense * ball_defense_reward + \
w_blva * ball_leave_area_reward
self.reward_shaping_total['move'] += w_move * move_reward
self.reward_shaping_total['move_y'] += w_move_y * move_y_reward
self.reward_shaping_total['ball_grad'] += w_ball_pot * ball_potential
self.reward_shaping_total['distance_own_goal_bar'] += w_distance * dist_robot_own_goal_bar
self.reward_shaping_total['defense'] += ball_defense_reward * w_defense
self.reward_shaping_total['ball_leave_area'] += w_blva * ball_leave_area_reward
self.last_frame = self.frame
done = goal_score != 0 or done
return reward, done
def _get_initial_positions_frame(self):
"""
Goalie starts at the center of the goal, striker and ball randomly.
Other robots also starts at random positions.
"""
field_half_length = self.field_params['field_length'] / 2
field_half_width = self.field_params['field_width'] / 2
def x(): return random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
def y(): return random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
pos_frame: Frame = Frame()
pos_frame.ball.x = random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
pos_frame.ball.y = random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
pos_frame.robots_blue[0] = Robot(x=-field_half_length + 0.05,
y=0.0,
theta=0)
pos_frame.robots_blue[1] = Robot(x=x(), y=y(), theta=0)
pos_frame.robots_blue[2] = Robot(x=x(), y=y(), theta=0)
pos_frame.robots_yellow[0] = Robot(x=x(), y=y(), theta=math.pi)
pos_frame.robots_yellow[1] = Robot(x=x(), y=y(), theta=math.pi)
pos_frame.robots_yellow[2] = Robot(x=x(), y=y(), theta=math.pi)
return pos_frame | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/vss/env_gk/vss_gk.py | 0.58948 | 0.266196 | vss_gk.py | pypi |
import math
import random
from rsoccer_gym.Utils.Utils import OrnsteinUhlenbeckAction
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot
from rsoccer_gym.vss.vss_gym_base import VSSBaseFIRAEnv
class VSSFIRAEnv(VSSBaseFIRAEnv):
"""This environment controls a single robot in a VSS soccer League 3v3 match
Description:
Observation:
Type: Box(40)
Normalized Bounds to [-1.25, 1.25]
Num Observation normalized
0 Ball X
1 Ball Y
2 Ball Vx
3 Ball Vy
4 + (7 * i) id i Blue Robot X
5 + (7 * i) id i Blue Robot Y
6 + (7 * i) id i Blue Robot sin(theta)
7 + (7 * i) id i Blue Robot cos(theta)
8 + (7 * i) id i Blue Robot Vx
9 + (7 * i) id i Blue Robot Vy
10 + (7 * i) id i Blue Robot v_theta
25 + (5 * i) id i Yellow Robot X
26 + (5 * i) id i Yellow Robot Y
27 + (5 * i) id i Yellow Robot Vx
28 + (5 * i) id i Yellow Robot Vy
29 + (5 * i) id i Yellow Robot v_theta
Actions:
Type: Box(2, )
Num Action
0 id 0 Blue Left Wheel Speed (%)
1 id 0 Blue Right Wheel Speed (%)
Reward:
Sum of Rewards:
Goal
Ball Potential Gradient
Move to Ball
Energy Penalty
Starting State:
Randomized Robots and Ball initial Position
Episode Termination:
5 minutes match time
"""
def __init__(self):
super().__init__(field_type=0, n_robots_blue=3, n_robots_yellow=3,
time_step=0.025)
low_obs_bound = [-1.2, -1.2, -1.25, -1.25]
low_obs_bound += [-1.2, -1.2, -1, -1, -1.25, -1.25, -1.2]*3
low_obs_bound += [-1.2, -1.2, -1.25, -1.25, -1.2]*3
high_obs_bound = [1.2, 1.2, 1.25, 1.25]
high_obs_bound += [1.2, 1.2, 1, 1, 1.25, 1.25, 1.2]*3
high_obs_bound += [1.2, 1.2, 1.25, 1.25, 1.2]*3
low_obs_bound = np.array(low_obs_bound, dtype=np.float32)
high_obs_bound = np.array(high_obs_bound, dtype=np.float32)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(2, ), dtype=np.float32)
self.observation_space = gym.spaces.Box(low=low_obs_bound,
high=high_obs_bound,
shape=(40, ), dtype=np.float32)
# Initialize Class Atributes
self.previous_ball_potential = None
self.actions: Dict = None
self.reward_shaping_total = None
self.v_wheel_deadzone = 0.05
self.ou_actions = []
for i in range(self.n_robots_blue + self.n_robots_yellow):
self.ou_actions.append(
OrnsteinUhlenbeckAction(self.action_space, dt=self.time_step)
)
print('Environment initialized')
def reset(self):
self.actions = None
self.reward_shaping_total = None
for ou in self.ou_actions:
ou.reset()
return super().reset()
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_x))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_y))
observation.append(
self.norm_w(self.frame.robots_yellow[i].v_theta)
)
return np.array(observation)
def _get_commands(self, actions):
commands = []
self.actions = {}
self.actions[0] = actions
v_wheel1, v_wheel2 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=0, v_wheel1=v_wheel1,
v_wheel2=v_wheel2))
# Send random commands to the other robots
for i in range(1, self.n_robots_blue):
actions = self.ou_actions[i].sample()
self.actions[i] = actions
v_wheel1, v_wheel2 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=i, v_wheel1=v_wheel1,
v_wheel2=v_wheel2))
for i in range(self.n_robots_yellow):
actions = self.ou_actions[self.n_robots_blue+i].sample()
v_wheel1, v_wheel2 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=True, id=i, v_wheel1=v_wheel1,
v_wheel2=v_wheel2))
return commands
def __ball_grad(self):
'''Calculate ball potential gradient
Difference of potential of the ball in time_step seconds.
'''
# Calculate ball potential
length_cm = self.field_params['field_length'] * 100
half_lenght = (self.field_params['field_length'] / 2.0)\
+ self.field_params['goal_depth']
# distance to defence
dx_d = (half_lenght + self.frame.ball.x) * 100
# distance to attack
dx_a = (half_lenght - self.frame.ball.x) * 100
dy = (self.frame.ball.y) * 100
dist_1 = -math.sqrt(dx_a ** 2 + 2 * dy ** 2)
dist_2 = math.sqrt(dx_d ** 2 + 2 * dy ** 2)
ball_potential = ((dist_1 + dist_2) / length_cm - 1) / 2
grad_ball_potential = 0
# Calculate ball potential gradient
# = actual_potential - previous_potential
if self.previous_ball_potential is not None:
diff = ball_potential - self.previous_ball_potential
grad_ball_potential = np.clip(diff * 3 / self.time_step,
-1.0, 1.0)
self.previous_ball_potential = ball_potential
return grad_ball_potential
def __move_reward(self):
'''Calculate Move to ball reward
Cosine between the robot vel vector and the vector robot -> ball.
This indicates rather the robot is moving towards the ball or not.
'''
ball = np.array([self.frame.ball.x, self.frame.ball.y])
robot = np.array([self.frame.robots_blue[0].x,
self.frame.robots_blue[0].y])
robot_vel = np.array([self.frame.robots_blue[0].v_x,
self.frame.robots_blue[0].v_y])
robot_ball = ball - robot
robot_ball = robot_ball/np.linalg.norm(robot_ball)
move_reward = np.dot(robot_ball, robot_vel)
move_reward = np.clip(move_reward / 0.4, -1.0, 1.0)
return move_reward
def __energy_penalty(self):
'''Calculates the energy penalty'''
en_penalty_1 = abs(self.sent_commands[0].v_wheel1)
en_penalty_2 = abs(self.sent_commands[0].v_wheel2)
energy_penalty = - (en_penalty_1 + en_penalty_2)
energy_penalty /= self.rsim.robot_wheel_radius
return energy_penalty
def _calculate_reward_and_done(self):
reward = 0
goal = False
w_move = 0.2
w_ball_grad = 0.8
w_energy = 2e-4
if self.reward_shaping_total is None:
self.reward_shaping_total = {'goal_score': 0, 'move': 0,
'ball_grad': 0, 'energy': 0,
'goals_blue': 0, 'goals_yellow': 0}
# Check if goal ocurred
if self.frame.ball.x > (self.field_params['field_length'] / 2):
self.reward_shaping_total['goal_score'] += 1
self.reward_shaping_total['goals_blue'] += 1
reward = 10
goal = True
elif self.frame.ball.x < -(self.field_params['field_length'] / 2):
self.reward_shaping_total['goal_score'] -= 1
self.reward_shaping_total['goals_yellow'] += 1
reward = -10
goal = True
else:
if self.last_frame is not None:
# Calculate ball potential
grad_ball_potential = self.__ball_grad()
# Calculate Move ball
move_reward = self.__move_reward()
# Calculate Energy penalty
energy_penalty = self.__energy_penalty()
reward = w_move * move_reward + \
w_ball_grad * grad_ball_potential + \
w_energy * energy_penalty
self.reward_shaping_total['move'] += w_move * move_reward
self.reward_shaping_total['ball_grad'] += w_ball_grad \
* grad_ball_potential
self.reward_shaping_total['energy'] += w_energy \
* energy_penalty
if goal:
initial_pos_frame: Frame = self._get_initial_positions_frame()
self.rsim.reset(initial_pos_frame)
self.frame = self.rsim.get_frame()
self.last_frame = None
done = self.steps
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the inicial frame'''
field_half_length = self.field_params['field_length'] / 2
field_half_width = self.field_params['field_width'] / 2
def x(): return random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
def y(): return random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
def theta(): return random.uniform(-180, 180)
pos_frame: Frame = Frame()
pos_frame.ball.x = x()
pos_frame.ball.y = y()
pos_frame.ball.v_x = 0.
pos_frame.ball.v_y = 0.
agents = []
for i in range(self.n_robots_blue):
pos_frame.robots_blue[i] = Robot(x=x(), y=y(), theta=theta())
agents.append(pos_frame.robots_blue[i])
for i in range(self.n_robots_yellow):
pos_frame.robots_yellow[i] = Robot(x=x(), y=y(), theta=theta())
agents.append(pos_frame.robots_blue[i])
def same_position_ref(x, y, x_ref, y_ref, radius):
if x >= x_ref - radius and x <= x_ref + radius and \
y >= y_ref - radius and y <= y_ref + radius:
return True
return False
radius_ball = 0.2
radius_robot = 0.2
same_pos = True
while same_pos:
for i in range(len(agents)):
same_pos = False
while same_position_ref(agents[i].x, agents[i].y, pos_frame.ball.x, pos_frame.ball.y, radius_ball):
agents[i] = Robot(x=x(), y=y(), theta=theta())
same_pos = True
for j in range(i + 1, len(agents)):
while same_position_ref(agents[i].x, agents[i].y, agents[j].x, agents[j].y, radius_robot):
agents[i] = Robot(x=x(), y=y(), theta=theta())
same_pos = True
pos_frame.robots_blue[0] = agents[0]
pos_frame.robots_blue[1] = agents[1]
pos_frame.robots_blue[2] = agents[2]
pos_frame.robots_yellow[0] = agents[3]
pos_frame.robots_yellow[1] = agents[4]
pos_frame.robots_yellow[2] = agents[5]
return pos_frame
def _actions_to_v_wheels(self, actions):
left_wheel_speed = actions[0] * self.rsim.linear_speed_range
right_wheel_speed = actions[1] * self.rsim.linear_speed_range
left_wheel_speed, right_wheel_speed = np.clip(
(left_wheel_speed, right_wheel_speed),
-self.rsim.linear_speed_range,
self.rsim.linear_speed_range
)
# Deadzone
if -self.v_wheel_deadzone < left_wheel_speed < self.v_wheel_deadzone:
left_wheel_speed = 0
if -self.v_wheel_deadzone < right_wheel_speed < self.v_wheel_deadzone:
right_wheel_speed = 0
return left_wheel_speed, right_wheel_speed | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/vss/env_vss/vss_gym_fira.py | 0.687105 | 0.331147 | vss_gym_fira.py | pypi |
import math
import random
from rsoccer_gym.Utils.Utils import OrnsteinUhlenbeckAction
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.vss.vss_gym_base import VSSBaseEnv
from rsoccer_gym.Utils import KDTree
class VSSEnv(VSSBaseEnv):
"""This environment controls a single robot in a VSS soccer League 3v3 match
Description:
Observation:
Type: Box(40)
Normalized Bounds to [-1.25, 1.25]
Num Observation normalized
0 Ball X
1 Ball Y
2 Ball Vx
3 Ball Vy
4 + (7 * i) id i Blue Robot X
5 + (7 * i) id i Blue Robot Y
6 + (7 * i) id i Blue Robot sin(theta)
7 + (7 * i) id i Blue Robot cos(theta)
8 + (7 * i) id i Blue Robot Vx
9 + (7 * i) id i Blue Robot Vy
10 + (7 * i) id i Blue Robot v_theta
25 + (5 * i) id i Yellow Robot X
26 + (5 * i) id i Yellow Robot Y
27 + (5 * i) id i Yellow Robot Vx
28 + (5 * i) id i Yellow Robot Vy
29 + (5 * i) id i Yellow Robot v_theta
Actions:
Type: Box(2, )
Num Action
0 id 0 Blue Left Wheel Speed (%)
1 id 0 Blue Right Wheel Speed (%)
Reward:
Sum of Rewards:
Goal
Ball Potential Gradient
Move to Ball
Energy Penalty
Starting State:
Randomized Robots and Ball initial Position
Episode Termination:
5 minutes match time
"""
def __init__(self):
super().__init__(field_type=0, n_robots_blue=3, n_robots_yellow=3,
time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(2, ), dtype=np.float32)
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(40, ), dtype=np.float32)
# Initialize Class Atributes
self.previous_ball_potential = None
self.actions: Dict = None
self.reward_shaping_total = None
self.v_wheel_deadzone = 0.05
self.ou_actions = []
for i in range(self.n_robots_blue + self.n_robots_yellow):
self.ou_actions.append(
OrnsteinUhlenbeckAction(self.action_space, dt=self.time_step)
)
print('Environment initialized')
def reset(self):
self.actions = None
self.reward_shaping_total = None
self.previous_ball_potential = None
for ou in self.ou_actions:
ou.reset()
return super().reset()
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_x))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_y))
observation.append(
self.norm_w(self.frame.robots_yellow[i].v_theta)
)
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
self.actions = {}
self.actions[0] = actions
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=0, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
# Send random commands to the other robots
for i in range(1, self.n_robots_blue):
actions = self.ou_actions[i].sample()
self.actions[i] = actions
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
for i in range(self.n_robots_yellow):
actions = self.ou_actions[self.n_robots_blue+i].sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=True, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
return commands
def _calculate_reward_and_done(self):
reward = 0
goal = False
w_move = 0.2
w_ball_grad = 0.8
w_energy = 2e-4
if self.reward_shaping_total is None:
self.reward_shaping_total = {'goal_score': 0, 'move': 0,
'ball_grad': 0, 'energy': 0,
'goals_blue': 0, 'goals_yellow': 0}
# Check if goal ocurred
if self.frame.ball.x > (self.field.length / 2):
self.reward_shaping_total['goal_score'] += 1
self.reward_shaping_total['goals_blue'] += 1
reward = 10
goal = True
elif self.frame.ball.x < -(self.field.length / 2):
self.reward_shaping_total['goal_score'] -= 1
self.reward_shaping_total['goals_yellow'] += 1
reward = -10
goal = True
else:
if self.last_frame is not None:
# Calculate ball potential
grad_ball_potential = self.__ball_grad()
# Calculate Move ball
move_reward = self.__move_reward()
# Calculate Energy penalty
energy_penalty = self.__energy_penalty()
reward = w_move * move_reward + \
w_ball_grad * grad_ball_potential + \
w_energy * energy_penalty
self.reward_shaping_total['move'] += w_move * move_reward
self.reward_shaping_total['ball_grad'] += w_ball_grad \
* grad_ball_potential
self.reward_shaping_total['energy'] += w_energy \
* energy_penalty
return reward, goal
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
field_half_length = self.field.length / 2
field_half_width = self.field.width / 2
def x(): return random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
def y(): return random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
def theta(): return random.uniform(0, 360)
pos_frame: Frame = Frame()
pos_frame.ball = Ball(x=x(), y=y())
min_dist = 0.1
places = KDTree()
places.insert((pos_frame.ball.x, pos_frame.ball.y))
for i in range(self.n_robots_blue):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_blue[i] = Robot(x=pos[0], y=pos[1], theta=theta())
for i in range(self.n_robots_yellow):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_yellow[i] = Robot(x=pos[0], y=pos[1], theta=theta())
return pos_frame
def _actions_to_v_wheels(self, actions):
left_wheel_speed = actions[0] * self.max_v
right_wheel_speed = actions[1] * self.max_v
left_wheel_speed, right_wheel_speed = np.clip(
(left_wheel_speed, right_wheel_speed), -self.max_v, self.max_v
)
# Deadzone
if -self.v_wheel_deadzone < left_wheel_speed < self.v_wheel_deadzone:
left_wheel_speed = 0
if -self.v_wheel_deadzone < right_wheel_speed < self.v_wheel_deadzone:
right_wheel_speed = 0
# Convert to rad/s
left_wheel_speed /= self.field.rbt_wheel_radius
right_wheel_speed /= self.field.rbt_wheel_radius
return left_wheel_speed , right_wheel_speed
def __ball_grad(self):
'''Calculate ball potential gradient
Difference of potential of the ball in time_step seconds.
'''
# Calculate ball potential
length_cm = self.field.length * 100
half_lenght = (self.field.length / 2.0)\
+ self.field.goal_depth
# distance to defence
dx_d = (half_lenght + self.frame.ball.x) * 100
# distance to attack
dx_a = (half_lenght - self.frame.ball.x) * 100
dy = (self.frame.ball.y) * 100
dist_1 = -math.sqrt(dx_a ** 2 + 2 * dy ** 2)
dist_2 = math.sqrt(dx_d ** 2 + 2 * dy ** 2)
ball_potential = ((dist_1 + dist_2) / length_cm - 1) / 2
grad_ball_potential = 0
# Calculate ball potential gradient
# = actual_potential - previous_potential
if self.previous_ball_potential is not None:
diff = ball_potential - self.previous_ball_potential
grad_ball_potential = np.clip(diff * 3 / self.time_step,
-5.0, 5.0)
self.previous_ball_potential = ball_potential
return grad_ball_potential
def __move_reward(self):
'''Calculate Move to ball reward
Cosine between the robot vel vector and the vector robot -> ball.
This indicates rather the robot is moving towards the ball or not.
'''
ball = np.array([self.frame.ball.x, self.frame.ball.y])
robot = np.array([self.frame.robots_blue[0].x,
self.frame.robots_blue[0].y])
robot_vel = np.array([self.frame.robots_blue[0].v_x,
self.frame.robots_blue[0].v_y])
robot_ball = ball - robot
robot_ball = robot_ball/np.linalg.norm(robot_ball)
move_reward = np.dot(robot_ball, robot_vel)
move_reward = np.clip(move_reward / 0.4, -5.0, 5.0)
return move_reward
def __energy_penalty(self):
'''Calculates the energy penalty'''
en_penalty_1 = abs(self.sent_commands[0].v_wheel0)
en_penalty_2 = abs(self.sent_commands[0].v_wheel1)
energy_penalty = - (en_penalty_1 + en_penalty_2)
return energy_penalty | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/vss/env_vss/vss_gym.py | 0.687945 | 0.298274 | vss_gym.py | pypi |
import math
import os
import random
from typing import Dict
import gym
import numpy as np
import torch
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.Utils.Utils import OrnsteinUhlenbeckAction
from rsoccer_gym.vss.env_ma.opponent.model import DDPGActor
from rsoccer_gym.vss.vss_gym_base import VSSBaseEnv
from rsoccer_gym.Utils import KDTree
class VSSMAEnv(VSSBaseEnv):
"""This environment controls N robots in a VSS soccer League 3v3 match
Description:
Observation:
Type: Box(40)
Normalized Bounds to [-1.25, 1.25]
Num Observation normalized
0 Ball X
1 Ball Y
2 Ball Vx
3 Ball Vy
4 + (7 * i) id i Blue Robot X
5 + (7 * i) id i Blue Robot Y
6 + (7 * i) id i Blue Robot sin(theta)
7 + (7 * i) id i Blue Robot cos(theta)
8 + (7 * i) id i Blue Robot Vx
9 + (7 * i) id i Blue Robot Vy
10 + (7 * i) id i Blue Robot v_theta
25 + (5 * i) id i Yellow Robot X
26 + (5 * i) id i Yellow Robot Y
27 + (5 * i) id i Yellow Robot Vx
28 + (5 * i) id i Yellow Robot Vy
29 + (5 * i) id i Yellow Robot v_theta
Actions:
Type: Box(N, 2)
For each blue robot in control:
Num Action
0 Left Wheel Speed (%)
1 Right Wheel Speed (%)
Reward:
Sum of Rewards:
For all robots:
Goal
Ball Potential Gradient
Individual:
Move to Ball
Energy Penalty
Starting State:
Randomized Robots and Ball initial Position
Episode Termination:
5 minutes match time
"""
def __init__(self, n_robots_control=3):
super().__init__(field_type=0, n_robots_blue=3, n_robots_yellow=3,
time_step=0.025)
self.n_robots_control = n_robots_control
self.action_space = gym.spaces.Box(low=-1,
high=1,
shape=(n_robots_control, 2))
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_robots_control, 40),
dtype=np.float32)
# Initialize Class Atributes
self.previous_ball_potential = None
self.actions: Dict = None
self.reward_shaping_total = None
self.v_wheel_deadzone = 0.05
self.ou_actions = []
for i in range(self.n_robots_blue + self.n_robots_yellow):
self.ou_actions.append(
OrnsteinUhlenbeckAction(self.action_space, dt=self.time_step)
)
print('Environment initialized')
def reset(self):
self.actions = None
self.reward_shaping_total = None
self.previous_ball_potential = None
for ou in self.ou_actions:
ou.reset()
return super().reset()
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def get_rotated_obs(self):
robots_dict = dict()
for i in range(self.n_robots_blue):
robots_dict[i] = list()
robots_dict[i].append(self.norm_pos(self.frame.robots_blue[i].x))
robots_dict[i].append(self.norm_pos(self.frame.robots_blue[i].y))
robots_dict[i].append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
robots_dict[i].append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
robots_dict[i].append(self.norm_v(self.frame.robots_blue[i].v_x))
robots_dict[i].append(self.norm_v(self.frame.robots_blue[i].v_y))
robots_dict[i].append(self.norm_w(self.frame.robots_blue[i].v_theta))
rotaded_obs = list()
for i in range(self.n_robots_control):
aux_dict = {}
aux_dict.update(robots_dict)
rotated = list()
rotated = rotated + aux_dict.pop(i)
teammates = list(aux_dict.values())
for teammate in teammates:
rotated = rotated + teammate
rotaded_obs.append(rotated)
return rotaded_obs
def _frame_to_observations(self):
observations = list()
robots = self.get_rotated_obs()
for idx in range(self.n_robots_control):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
observation += robots[idx]
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_x))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_y))
observation.append(self.norm_w(self.frame.robots_yellow[i].v_theta))
observations.append(np.array(observation, dtype=np.float32))
observations = np.array(observations)
return observations
def _get_commands(self, actions):
commands = []
self.actions = {}
# Send random commands to the other robots
for i in range(self.n_robots_control):
self.actions[i] = actions[i]
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions[i])
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
for i in range(self.n_robots_control, self.n_robots_blue):
actions = self.ou_actions[i].sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions[0])
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
for i in range(self.n_robots_yellow):
actions = self.ou_actions[self.n_robots_blue+i].sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions[0])
commands.append(Robot(yellow=True, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
return commands
def _calculate_reward_and_done(self):
reward = {f'robot_{i}': 0 for i in range(self.n_robots_control)}
goal = False
w_move = 0.2
w_ball_grad = 0.8
w_energy = 2e-4
if self.reward_shaping_total is None:
self.reward_shaping_total = {'goal_score': 0, 'ball_grad': 0,
'goals_blue': 0, 'goals_yellow': 0}
for i in range(self.n_robots_control):
self.reward_shaping_total[f'robot_{i}'] = {
'move': 0, 'energy': 0}
# Check if goal ocurred
if self.frame.ball.x > (self.field.length / 2):
self.reward_shaping_total['goal_score'] += 1
self.reward_shaping_total['goals_blue'] += 1
for i in range(self.n_robots_control):
reward[f'robot_{i}'] = 10
goal = True
elif self.frame.ball.x < -(self.field.length / 2):
self.reward_shaping_total['goal_score'] -= 1
self.reward_shaping_total['goals_yellow'] += 1
for i in range(self.n_robots_control):
reward[f'robot_{i}'] = -10
goal = True
else:
if self.last_frame is not None:
# Calculate ball potential
grad_ball_potential = self._ball_grad()
self.reward_shaping_total['ball_grad'] += w_ball_grad * grad_ball_potential # noqa
for idx in range(self.n_robots_control):
# Calculate Move ball
move_reward = self._move_reward(robot_idx=idx)
# Calculate Energy penalty
energy_penalty = self._energy_penalty(robot_idx=idx)
rew = w_ball_grad * grad_ball_potential + \
w_move * move_reward + \
w_energy * energy_penalty
reward[f'robot_{idx}'] += rew
self.reward_shaping_total[f'robot_{idx}']['move'] += w_move * move_reward # noqa
self.reward_shaping_total[f'robot_{idx}']['energy'] += w_energy * energy_penalty # noqa
return reward, goal
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
field_half_length = self.field.length / 2
field_half_width = self.field.width / 2
def x(): return random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
def y(): return random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
def theta(): return random.uniform(0, 360)
pos_frame: Frame = Frame()
pos_frame.ball = Ball(x=x(), y=y())
min_dist = 0.1
places = KDTree()
places.insert((pos_frame.ball.x, pos_frame.ball.y))
for i in range(self.n_robots_blue):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_blue[i] = Robot(x=pos[0], y=pos[1], theta=theta())
for i in range(self.n_robots_yellow):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_yellow[i] = Robot(x=pos[0], y=pos[1], theta=theta())
return pos_frame
def _actions_to_v_wheels(self, actions):
left_wheel_speed = actions[0] * self.max_v
right_wheel_speed = actions[1] * self.max_v
left_wheel_speed, right_wheel_speed = np.clip(
(left_wheel_speed, right_wheel_speed), -self.max_v, self.max_v
)
# Deadzone
if -self.v_wheel_deadzone < left_wheel_speed < self.v_wheel_deadzone:
left_wheel_speed = 0
if -self.v_wheel_deadzone < right_wheel_speed < self.v_wheel_deadzone:
right_wheel_speed = 0
# Convert to rad/s
left_wheel_speed /= self.field.rbt_wheel_radius
right_wheel_speed /= self.field.rbt_wheel_radius
return left_wheel_speed , right_wheel_speed
def _ball_grad(self):
'''Calculate ball potential gradient
Difference of potential of the ball in time_step seconds.
'''
# Calculate ball potential
length_cm = self.field.length * 100
half_lenght = (self.field.length / 2.0)\
+ self.field.goal_depth
# distance to defence
dx_d = (half_lenght + self.frame.ball.x) * 100
# distance to attack
dx_a = (half_lenght - self.frame.ball.x) * 100
dy = (self.frame.ball.y) * 100
dist_1 = -math.sqrt(dx_a ** 2 + 2 * dy ** 2)
dist_2 = math.sqrt(dx_d ** 2 + 2 * dy ** 2)
ball_potential = ((dist_1 + dist_2) / length_cm - 1) / 2
grad_ball_potential = 0
# Calculate ball potential gradient
# = actual_potential - previous_potential
if self.previous_ball_potential is not None:
diff = ball_potential - self.previous_ball_potential
grad_ball_potential = np.clip(diff * 3 / self.time_step,
-5.0, 5.0)
self.previous_ball_potential = ball_potential
return grad_ball_potential
def _move_reward(self, robot_idx: int):
'''Calculate Move to ball reward
Cosine between the robot vel vector and the vector robot -> ball.
This indicates rather the robot is moving towards the ball or not.
'''
ball = np.array([self.frame.ball.x, self.frame.ball.y])
robot = np.array([self.frame.robots_blue[robot_idx].x,
self.frame.robots_blue[robot_idx].y])
robot_vel = np.array([self.frame.robots_blue[robot_idx].v_x,
self.frame.robots_blue[robot_idx].v_y])
robot_ball = ball - robot
robot_ball = robot_ball/np.linalg.norm(robot_ball)
move_reward = np.dot(robot_ball, robot_vel)
move_reward = np.clip(move_reward / 0.4, -5.0, 5.0)
return move_reward
def _energy_penalty(self, robot_idx: int):
'''Calculates the energy penalty'''
en_penalty_1 = abs(self.sent_commands[robot_idx].v_wheel0)
en_penalty_2 = abs(self.sent_commands[robot_idx].v_wheel1)
energy_penalty = - (en_penalty_1 + en_penalty_2)
return energy_penalty
class VSSMAOpp(VSSMAEnv):
def __init__(self, n_robots_control=3):
super().__init__(n_robots_control=n_robots_control)
self.load_opp()
def load_opp(self):
device = torch.device('cpu')
atk_path = os.path.dirname(os.path.realpath(__file__))\
+ '/opponent/opp.pth'
self.opp = DDPGActor(40, 2)
atk_checkpoint = torch.load(atk_path, map_location=device)
self.opp.load_state_dict(atk_checkpoint['state_dict_act'])
self.opp.eval()
def _opp_obs(self):
observation = []
observation.append(self.norm_pos(-self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(-self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
# we reflect the side that the opp is attacking,
# so that he will attack towards the goal where the goalkeeper is
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(-self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_yellow[i].theta))
)
observation.append(
-np.cos(np.deg2rad(self.frame.robots_yellow[i].theta))
)
observation.append(self.norm_v(-self.frame.robots_yellow[i].v_x))
observation.append(self.norm_v(self.frame.robots_yellow[i].v_y))
observation.append(self.norm_w(-self.frame.robots_yellow[i].v_theta))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(-self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(self.norm_v(-self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(-self.frame.robots_blue[i].v_theta))
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
self.actions = {}
for i in range(self.n_robots_control):
self.actions[i] = actions[i]
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions[i])
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
for i in range(self.n_robots_control, self.n_robots_blue):
actions = self.action_space.sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=False, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
atk_action = self.opp.get_action(self._opp_obs())
v_wheel0, v_wheel1 = self._actions_to_v_wheels(atk_action)
commands.append(Robot(yellow=True, id=0, v_wheel0=v_wheel1,
v_wheel1=v_wheel0))
for i in range(1, self.n_robots_yellow):
actions = self.action_space.sample()
v_wheel0, v_wheel1 = self._actions_to_v_wheels(actions)
commands.append(Robot(yellow=True, id=i, v_wheel0=v_wheel0,
v_wheel1=v_wheel1))
return commands | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/vss/env_ma/vss_gym_ma.py | 0.659734 | 0.254189 | vss_gym_ma.py | pypi |
import numpy as np
from typing import Dict
from rsoccer_gym.Entities.Ball import Ball
from rsoccer_gym.Entities.Robot import Robot
class Frame:
"""Units: seconds, m, m/s, degrees, degrees/s. Reference is field center."""
def __init__(self):
"""Init Frame object."""
self.ball: Ball = Ball()
self.robots_blue: Dict[int, Robot] = {}
self.robots_yellow: Dict[int, Robot] = {}
class FrameVSS(Frame):
def parse(self, state, n_blues=3, n_yellows=3):
"""It parses the state received from grSim in a common state for environment"""
self.ball.x = state[0]
self.ball.y = state[1]
self.ball.z = state[2]
self.ball.v_x = state[3]
self.ball.v_y = state[4]
rbt_obs = 6
for i in range(n_blues):
robot = Robot()
robot.id = i
robot.x = state[5 + (rbt_obs*i) + 0]
robot.y = state[5 + (rbt_obs*i) + 1]
robot.theta = state[5 + (rbt_obs*i) + 2]
robot.v_x = state[5 + (rbt_obs*i) + 3]
robot.v_y = state[5 + (rbt_obs*i) + 4]
robot.v_theta = state[5 + (rbt_obs*i) + 5]
self.robots_blue[robot.id] = robot
for i in range(n_yellows):
robot = Robot()
robot.id = i
robot.x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 0]
robot.y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 1]
robot.theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 2]
robot.v_x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 3]
robot.v_y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 4]
robot.v_theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 5]
self.robots_yellow[robot.id] = robot
class FrameSSL(Frame):
def parse(self, state, n_blues=3, n_yellows=3):
"""It parses the state received from grSim in a common state for environment"""
self.ball.x = state[0]
self.ball.y = state[1]
self.ball.z = state[2]
self.ball.v_x = state[3]
self.ball.v_y = state[4]
rbt_obs = 11
for i in range(n_blues):
robot = Robot()
robot.id = i
robot.x = state[5 + (rbt_obs*i) + 0]
robot.y = state[5 + (rbt_obs*i) + 1]
robot.theta = state[5 + (rbt_obs*i) + 2]
robot.v_x = state[5 + (rbt_obs*i) + 3]
robot.v_y = state[5 + (rbt_obs*i) + 4]
robot.v_theta = state[5 + (rbt_obs*i) + 5]
robot.infrared = bool(state[5 + (rbt_obs*i) + 6])
robot.v_wheel0 = state[5 + (rbt_obs*i) + 7]
robot.v_wheel1 = state[5 + (rbt_obs*i) + 8]
robot.v_wheel2 = state[5 + (rbt_obs*i) + 9]
robot.v_wheel3 = state[5 + (rbt_obs*i) + 10]
self.robots_blue[robot.id] = robot
for i in range(n_yellows):
robot = Robot()
robot.id = i
robot.x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 0]
robot.y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 1]
robot.theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 2]
robot.v_x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 3]
robot.v_y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 4]
robot.v_theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 5]
robot.infrared = bool(state[5 + n_blues*rbt_obs + (rbt_obs*i) + 6])
robot.v_wheel0 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 7]
robot.v_wheel1 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 8]
robot.v_wheel2 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 9]
robot.v_wheel3 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 10]
self.robots_yellow[robot.id] = robot
class FramePB(Frame):
def parse(self, packet):
"""It parses the state received from grSim in a common state for environment"""
self.ball.x = packet.frame.ball.x
self.ball.y = packet.frame.ball.y
self.ball.v_x = packet.frame.ball.vx
self.ball.v_y = packet.frame.ball.vy
for _robot in packet.frame.robots_blue:
robot = Robot()
robot.id = _robot.robot_id
robot.x = _robot.x
robot.y = _robot.y
robot.theta = np.rad2deg(_robot.orientation)
robot.v_x = _robot.vx
robot.v_y = _robot.vy
robot.v_theta = np.rad2deg(_robot.vorientation)
self.robots_blue[robot.id] = robot
for _robot in packet.frame.robots_yellow:
robot = Robot()
robot.id = _robot.robot_id
robot.x = _robot.x
robot.y = _robot.y
robot.theta = np.rad2deg(_robot.orientation)
robot.v_x = _robot.vx
robot.v_y = _robot.vy
robot.v_theta = np.rad2deg(_robot.vorientation)
self.robots_yellow[robot.id] = robot | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/Entities/Frame.py | 0.888372 | 0.455683 | Frame.py | pypi |
import math
def closest_node(values, node1, node2):
if node1 is None:
return node2, node2.distance2_to(values) if node2 is not None else math.inf
if node2 is None:
return node1, node1.distance2_to(values) if node1 is not None else math.inf
node1_dist2 = node1.distance2_to(values)
node2_dist2 = node2.distance2_to(values)
if node1_dist2 < node2_dist2:
return node1, node1_dist2
else:
return node2, node2_dist2
class KDTree:
class KDTreeNode:
def __init__(self, values, left=None, right=None):
self.values = values
self.left = left
self.right = right
def insert(self, values, depth=0):
if self.values is None:
self.values = values
else:
if values[depth % len(values)] < self.values[depth % len(self.values)]:
if self.left is None:
self.left = KDTree.KDTreeNode(values)
else:
self.left.insert(values, depth+1)
else:
if self.right is None:
self.right = KDTree.KDTreeNode(values)
else:
self.right.insert(values, depth+1)
def distance2_to(self, values):
d2 = 0
for i in range(len(values)):
d2 += (values[i] - self.values[i])**2
return d2
def get_nearest(self, values, depth=0):
if self.values is None:
return None, math.inf
if self.left is None and self.right is None:
return self, self.distance2_to(values)
if values[depth % len(values)] < self.values[depth % len(self.values)]:
next_branch = self.left
other_branch = self.right
else:
next_branch = self.left
other_branch = self.right
if next_branch is not None:
other, _ = next_branch.get_nearest(values, depth+1)
closest, closest_dist2 = closest_node(values, other, self)
else:
closest, closest_dist2 = self, self.distance2_to(values)
line_dist = values[depth % len(values)] - self.values[depth % len(self.values)]
if other_branch is not None:
if closest_dist2 >= line_dist**2:
other, _ = other_branch.get_nearest(values, depth+1)
closest, closest_dist2 = closest_node(values, other, closest)
return closest, closest_dist2
def __init__(self):
self.root = KDTree.KDTreeNode(None)
def insert(self, values):
self.root.insert(values)
def get_nearest(self, values):
node, dist2 = self.root.get_nearest(values)
return node.values, math.sqrt(dist2) | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/Utils/kdtree.py | 0.628179 | 0.623291 | kdtree.py | pypi |
import time
from typing import Dict, List, Optional
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Field
from rsoccer_gym.Simulators.rsim import RSimSSL
class SSLBaseEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
}
NORM_BOUNDS = 1.2
def __init__(self, field_type: int,
n_robots_blue: int, n_robots_yellow: int, time_step: float):
# Initialize Simulator
self.time_step = time_step
self.rsim = RSimSSL(field_type=field_type,
n_robots_blue=n_robots_blue,
n_robots_yellow=n_robots_yellow,
time_step_ms=int(self.time_step*1000))
self.n_robots_blue: int = n_robots_blue
self.n_robots_yellow: int = n_robots_yellow
# Get field dimensions
self.field_type: int = field_type
self.field: Field = self.rsim.get_field_params()
self.max_pos = max(self.field.width / 2, (self.field.length / 2)
+ self.field.penalty_length
)
max_wheel_rad_s = (self.field.rbt_motor_max_rpm / 60) * 2 * np.pi
self.max_v = max_wheel_rad_s * self.field.rbt_wheel_radius
# 0.04 = robot radius (0.09) + wheel thicknees (0.005)
self.max_w = np.rad2deg(self.max_v / 0.095)
# Initiate
self.frame: Frame = None
self.last_frame: Frame = None
self.view = None
self.steps = 0
self.sent_commands = None
def step(self, action):
self.steps += 1
# Join agent action with environment actions
commands: List[Robot] = self._get_commands(action)
# Send command to simulator
self.rsim.send_commands(commands)
self.sent_commands = commands
# Get Frame from simulator
self.last_frame = self.frame
self.frame = self.rsim.get_frame()
# Calculate environment observation, reward and done condition
observation = self._frame_to_observations()
reward, done = self._calculate_reward_and_done()
return observation, reward, done, {}
def reset(self):
self.steps = 0
self.last_frame = None
self.sent_commands = None
# Close render window
del(self.view)
self.view = None
initial_pos_frame: Frame = self._get_initial_positions_frame()
self.rsim.reset(initial_pos_frame)
# Get frame from simulator
self.frame = self.rsim.get_frame()
return self._frame_to_observations()
def render(self, mode: Optional = 'human') -> None:
'''
Renders the game depending on
ball's and players' positions.
Parameters
----------
None
Returns
-------
None
'''
if self.view == None:
from rsoccer_gym.Render import RCGymRender
self.view = RCGymRender(self.n_robots_blue,
self.n_robots_yellow,
self.field,
simulator='ssl')
return self.view.render_frame(self.frame, return_rgb_array=mode == "rgb_array")
def close(self):
self.rsim.stop()
def _get_commands(self, action):
'''returns a list of commands of type List[Robot] from type action_space action'''
raise NotImplementedError
def _frame_to_observations(self):
'''returns a type observation_space observation from a type List[Robot] state'''
raise NotImplementedError
def _calculate_reward_and_done(self):
'''returns reward value and done flag from type List[Robot] state'''
raise NotImplementedError
def _get_initial_positions_frame(self) -> Frame:
'''returns frame with robots initial positions'''
raise NotImplementedError
def norm_pos(self, pos):
return np.clip(
pos / self.max_pos,
-self.NORM_BOUNDS,
self.NORM_BOUNDS
)
def norm_v(self, v):
return np.clip(
v / self.max_v,
-self.NORM_BOUNDS,
self.NORM_BOUNDS
)
def norm_w(self, w):
return np.clip(
w / self.max_w,
-self.NORM_BOUNDS,
self.NORM_BOUNDS
) | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_gym_base.py | 0.901532 | 0.358129 | ssl_gym_base.py | pypi |
import math
import random
from rsoccer_gym.Utils.Utils import OrnsteinUhlenbeckAction
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
from rsoccer_gym.Utils import KDTree
class SSLGoToBallIREnv(SSLBaseEnv):
"""The SSL robot needs to reach the ball
Description:
One blue robot and a ball are randomly placed on a div B field,
the episode ends when the robots infrared is activated, the ir
is activated when the ball touches the robot kicker
Observation:
Type: Box(4 + 7*n_robots_blue + 5*n_robots_yellow)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->10 id 0 Blue [X, Y, sin(theta), cos(theta), v_x, v_y, v_theta]
+5*i id i Yellow Robot [X, Y, v_x, v_y, v_theta]
Actions:
Type: Box(3, )
Num Action
0 id 0 Blue Global X Direction Speed (%)
1 id 0 Blue Global Y Direction Speed (%)
2 id 0 Blue Angular Speed (%)
Reward:
1 if ball is reached
Starting State:
Randomized Robots and Ball initial Position
Episode Termination:
Ball is reached or 30 seconds (1200 steps)
"""
def __init__(self, field_type=1, n_robots_yellow=0):
super().__init__(field_type=field_type, n_robots_blue=1,
n_robots_yellow=n_robots_yellow, time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(3, ), dtype=np.float32)
n_obs = 4 + 7*self.n_robots_blue + 2*self.n_robots_yellow
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_obs, ),
dtype=np.float32)
# scale max dist rw to 1 Considering that max possible move rw if ball and robot are in opposite corners of field
self.ball_dist_scale = np.linalg.norm([self.field.width, self.field.length])
# scale max energy rw to 1 Considering that max possible energy if max robot wheel speed sent every step
wheel_max_rad_s = 160
max_steps = 1200
self.energy_scale = ((wheel_max_rad_s * 4) * max_steps)
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
print('Environment initialized')
def reset(self):
self.reward_shaping_total = None
return super().reset()
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
angle = self.frame.robots_blue[0].theta
v_x, v_y, v_theta = self.convert_actions(actions, np.deg2rad(angle))
cmd = Robot(yellow=False, id=0, v_x=v_x, v_y=v_y, v_theta=v_theta)
commands.append(cmd)
return commands
def convert_actions(self, action, angle):
"""Denormalize, clip to absolute max and convert to local"""
# Denormalize
v_x = action[0] * self.max_v
v_y = action[1] * self.max_v
v_theta = action[2] * self.max_w
# Convert to local
v_x, v_y = v_x*np.cos(angle) + v_y*np.sin(angle),\
-v_x*np.sin(angle) + v_y*np.cos(angle)
# clip by max absolute
v_norm = np.linalg.norm([v_x,v_y])
c = v_norm < self.max_v or self.max_v / v_norm
v_x, v_y = v_x*c, v_y*c
return v_x, v_y, v_theta
def _calculate_reward_and_done(self):
if self.reward_shaping_total is None:
self.reward_shaping_total = {
'goal': 0,
'ball_dist': 0,
'energy': 0
}
reward = 0
done = False
ball = self.frame.ball
robot = self.frame.robots_blue[0]
# Check if robot infrared is activated
if not done:
if robot.infrared:
reward = 1
done = True
self.reward_shaping_total['goal'] += 1
elif self.last_frame is not None:
ball_dist_rw = self.__ball_dist_rw() / self.ball_dist_scale
self.reward_shaping_total['ball_dist'] += ball_dist_rw
energy_rw = -self.__energy_pen() / self.energy_scale
self.reward_shaping_total['energy'] += energy_rw
reward = reward\
+ ball_dist_rw\
+ energy_rw
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
field_half_length = self.field.length / 2
field_half_width = self.field.width / 2
def x(): return random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
def y(): return random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
def theta(): return random.uniform(0, 360)
pos_frame: Frame = Frame()
pos_frame.ball = Ball(x=x(), y=y())
min_dist = 0.2
places = KDTree()
places.insert((pos_frame.ball.x, pos_frame.ball.y))
for i in range(self.n_robots_blue):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_blue[i] = Robot(x=pos[0], y=pos[1], theta=theta())
for i in range(self.n_robots_yellow):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_yellow[i] = Robot(x=pos[0], y=pos[1], theta=theta())
return pos_frame
def __ball_dist_rw(self):
assert(self.last_frame is not None)
# Calculate previous ball dist
last_ball = self.last_frame.ball
last_robot = self.last_frame.robots_blue[0]
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_robot_pos = np.array([last_robot.x, last_robot.y])
last_ball_dist = np.linalg.norm(last_robot_pos - last_ball_pos)
# Calculate new ball dist
ball = self.frame.ball
robot = self.frame.robots_blue[0]
ball_pos = np.array([ball.x, ball.y])
robot_pos = np.array([robot.x, robot.y])
ball_dist = np.linalg.norm(robot_pos - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
return ball_dist_rw
def __energy_pen(self):
robot = self.frame.robots_blue[0]
# Sum of abs each wheel speed sent
energy = abs(robot.v_wheel0)\
+ abs(robot.v_wheel1)\
+ abs(robot.v_wheel2)\
+ abs(robot.v_wheel3)
return energy | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_go_to_ball/ssl_gym_go_to_ball_ir.py | 0.739422 | 0.380845 | ssl_gym_go_to_ball_ir.py | pypi |
import math
import random
from rsoccer_gym.Utils.Utils import OrnsteinUhlenbeckAction
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
from rsoccer_gym.Utils import KDTree
class SSLGoToBallEnv(SSLBaseEnv):
"""The SSL robot needs to reach the ball
Description:
One blue robot and a ball are randomly placed on a div B field,
the episode ends when the robots is closer than 0.2m from the ball
Observation:
Type: Box(4 + 7*n_robots_blue + 5*n_robots_yellow)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->10 id 0 Blue [X, Y, sin(theta), cos(theta), v_x, v_y, v_theta]
+5*i id i Yellow Robot [X, Y, v_x, v_y, v_theta]
Actions:
Type: Box(3, )
Num Action
0 id 0 Blue Global X Direction Speed (%)
1 id 0 Blue Global Y Direction Speed (%)
2 id 0 Blue Angular Speed (%)
Reward:
1 if ball is reached
Starting State:
Randomized Robots and Ball initial Position
Episode Termination:
Ball is reached or 30 seconds (1200 steps)
"""
def __init__(self, field_type=1, n_robots_yellow=0):
super().__init__(field_type=field_type, n_robots_blue=1,
n_robots_yellow=n_robots_yellow, time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(3, ), dtype=np.float32)
n_obs = 4 + 7*self.n_robots_blue + 2*self.n_robots_yellow
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_obs, ),
dtype=np.float32)
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
print('Environment initialized')
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
angle = self.frame.robots_blue[0].theta
v_x, v_y, v_theta = self.convert_actions(actions, np.deg2rad(angle))
cmd = Robot(yellow=False, id=0, v_x=v_x, v_y=v_y, v_theta=v_theta)
commands.append(cmd)
return commands
def convert_actions(self, action, angle):
"""Denormalize, clip to absolute max and convert to local"""
# Denormalize
v_x = action[0] * self.max_v
v_y = action[1] * self.max_v
v_theta = action[2] * self.max_w
# Convert to local
v_x, v_y = v_x*np.cos(angle) + v_y*np.sin(angle),\
-v_x*np.sin(angle) + v_y*np.cos(angle)
# clip by max absolute
v_norm = np.linalg.norm([v_x,v_y])
c = v_norm < self.max_v or self.max_v / v_norm
v_x, v_y = v_x*c, v_y*c
return v_x, v_y, v_theta
def _calculate_reward_and_done(self):
reward = 0
ball = self.frame.ball
robot = self.frame.robots_blue[0]
dist_robot_ball = np.linalg.norm(
np.array([ball.x, ball.y])
- np.array([robot.x, robot.y])
)
# Check if robot is less than 0.2m from ball
if dist_robot_ball < 0.2:
reward = 1
done = reward
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
field_half_length = self.field.length / 2
field_half_width = self.field.width / 2
def x(): return random.uniform(-field_half_length + 0.1,
field_half_length - 0.1)
def y(): return random.uniform(-field_half_width + 0.1,
field_half_width - 0.1)
def theta(): return random.uniform(0, 360)
pos_frame: Frame = Frame()
pos_frame.ball = Ball(x=x(), y=y())
min_dist = 0.2
places = KDTree()
places.insert((pos_frame.ball.x, pos_frame.ball.y))
for i in range(self.n_robots_blue):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_blue[i] = Robot(x=pos[0], y=pos[1], theta=theta())
for i in range(self.n_robots_yellow):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_yellow[i] = Robot(x=pos[0], y=pos[1], theta=theta())
return pos_frame | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_go_to_ball/ssl_gym_go_to_ball.py | 0.681515 | 0.430566 | ssl_gym_go_to_ball.py | pypi |
import math
import random
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
from rsoccer_gym.Utils import KDTree
class SSLHWStaticDefendersEnv(SSLBaseEnv):
"""The SSL robot needs to make a goal on a field with static defenders
Description:
The controlled robot is started on the field center and needs to
score on the positive side field, where there are 6 static defenders
while obeying div B rules
Observation:
Type: Box(4 + 7*n_robots_blue + 2*n_robots_yellow)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->10 id 0 Blue [X, Y, sin(theta), cos(theta), v_x, v_y, v_theta]
+2*i id i Yellow Robot [X, Y]
Actions:
Type: Box(5, )
Num Action
0 id 0 Blue Global X Direction Speed (%)
1 id 0 Blue Global Y Direction Speed (%)
2 id 0 Blue Angular Speed (%)
3 id 0 Blue Kick x Speed (%)
4 id 0 Blue Dribbler (%) (true if % is positive)
Reward:
1 if goal
Starting State:
Robot on field center, ball and defenders randomly positioned on
positive field side
Episode Termination:
Goal, 25 seconds (1000 steps), or rule infraction
"""
def __init__(self, field_type=2):
super().__init__(field_type=field_type, n_robots_blue=1,
n_robots_yellow=6, time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(5, ), dtype=np.float32)
n_obs = 4 + 8*self.n_robots_blue + 2*self.n_robots_yellow
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_obs, ),
dtype=np.float32)
# scale max dist rw to 1 Considering that max possible move rw if ball and robot are in opposite corners of field
self.ball_dist_scale = np.linalg.norm([self.field.width, self.field.length/2])
self.ball_grad_scale = np.linalg.norm([self.field.width/2, self.field.length/2])/4
# scale max energy rw to 1 Considering that max possible energy if max robot wheel speed sent every step
wheel_max_rad_s = 160
max_steps = 1000
self.energy_scale = ((wheel_max_rad_s * 4) * max_steps)
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
self.kick_speed_x = 5.0
print('Environment initialized')
def reset(self):
self.reward_shaping_total = None
return super().reset()
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
observation.append(1 if self.frame.robots_blue[i].infrared else 0)
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
angle = self.frame.robots_blue[0].theta
v_x, v_y, v_theta = self.convert_actions(actions, np.deg2rad(angle))
cmd = Robot(yellow=False, id=0, v_x=v_x, v_y=v_y, v_theta=v_theta,
kick_v_x=self.kick_speed_x if actions[3] > 0 else 0.,
dribbler=True if actions[4] > 0 else False)
commands.append(cmd)
return commands
def convert_actions(self, action, angle):
"""Denormalize, clip to absolute max and convert to local"""
# Denormalize
v_x = action[0] * self.max_v
v_y = action[1] * self.max_v
v_theta = action[2] * self.max_w
# Convert to local
v_x, v_y = v_x*np.cos(angle) + v_y*np.sin(angle),\
-v_x*np.sin(angle) + v_y*np.cos(angle)
# clip by max absolute
v_norm = np.linalg.norm([v_x,v_y])
c = v_norm < self.max_v or self.max_v / v_norm
v_x, v_y = v_x*c, v_y*c
return v_x, v_y, v_theta
def _calculate_reward_and_done(self):
if self.reward_shaping_total is None:
self.reward_shaping_total = {
'goal': 0,
'rbt_in_gk_area': 0,
'done_ball_out': 0,
'done_ball_out_right': 0,
'done_rbt_out': 0,
'ball_dist': 0,
'ball_grad': 0,
'energy': 0
}
reward = 0
done = False
# Field parameters
half_len = self.field.length / 2
half_wid = self.field.width / 2
pen_len = self.field.penalty_length
half_pen_wid = self.field.penalty_width / 2
half_goal_wid = self.field.goal_width / 2
ball = self.frame.ball
robot = self.frame.robots_blue[0]
def robot_in_gk_area(rbt):
return rbt.x > half_len - pen_len and abs(rbt.y) < half_pen_wid
# Check if robot exited field right side limits
if robot.x < -0.2 or abs(robot.y) > half_wid:
done = True
self.reward_shaping_total['done_rbt_out'] += 1
# If flag is set, end episode if robot enter gk area
elif robot_in_gk_area(robot):
done = True
self.reward_shaping_total['rbt_in_gk_area'] += 1
# Check ball for ending conditions
elif ball.x < 0 or abs(ball.y) > half_wid:
done = True
self.reward_shaping_total['done_ball_out'] += 1
elif ball.x > half_len:
done = True
if abs(ball.y) < half_goal_wid:
reward = 5
self.reward_shaping_total['goal'] += 1
else:
reward = 0
self.reward_shaping_total['done_ball_out_right'] += 1
elif self.last_frame is not None:
ball_dist_rw = self.__ball_dist_rw() / self.ball_dist_scale
self.reward_shaping_total['ball_dist'] += ball_dist_rw
ball_grad_rw = self.__ball_grad_rw() / self.ball_grad_scale
self.reward_shaping_total['ball_grad'] += ball_grad_rw
energy_rw = -self.__energy_pen() / self.energy_scale
self.reward_shaping_total['energy'] += energy_rw
reward = reward\
+ ball_dist_rw\
+ ball_grad_rw\
+ energy_rw
done = done
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
half_len = self.field.length / 2
half_wid = self.field.width / 2
pen_len = self.field.penalty_length
half_pen_wid = self.field.penalty_width / 2
def x(): return random.uniform(0.2, half_len - 0.1)
def y(): return random.uniform(-half_wid + 0.1, half_wid - 0.1)
def theta(): return random.uniform(0, 360)
pos_frame: Frame = Frame()
pos_frame.robots_blue[0] = Robot(x=0., y=0., theta=0.)
def in_gk_area(obj):
return obj.x > half_len - pen_len and abs(obj.y) < half_pen_wid
pos_frame.ball = Ball(x=x(), y=y())
while in_gk_area(pos_frame.ball):
pos_frame.ball = Ball(x=x(), y=y())
min_dist = 0.2
places = KDTree()
places.insert((pos_frame.ball.x, pos_frame.ball.y))
places.insert((pos_frame.robots_blue[0].x, pos_frame.robots_blue[0].y))
for i in range(self.n_robots_yellow):
pos = (x(), y())
while places.get_nearest(pos)[1] < min_dist:
pos = (x(), y())
places.insert(pos)
pos_frame.robots_yellow[i] = Robot(x=pos[0], y=pos[1], theta=theta())
return pos_frame
def __ball_dist_rw(self):
assert(self.last_frame is not None)
# Calculate previous ball dist
last_ball = self.last_frame.ball
last_robot = self.last_frame.robots_blue[0]
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_robot_pos = np.array([last_robot.x, last_robot.y])
last_ball_dist = np.linalg.norm(last_robot_pos - last_ball_pos)
# Calculate new ball dist
ball = self.frame.ball
robot = self.frame.robots_blue[0]
ball_pos = np.array([ball.x, ball.y])
robot_pos = np.array([robot.x, robot.y])
ball_dist = np.linalg.norm(robot_pos - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
if ball_dist_rw > 1:
print("ball_dist -> ", ball_dist_rw)
print(self.frame.ball)
print(self.frame.robots_blue)
print(self.frame.robots_yellow)
print("===============================")
return np.clip(ball_dist_rw, -1, 1)
def __ball_grad_rw(self):
assert(self.last_frame is not None)
# Goal pos
goal = np.array([self.field.length/2, 0.])
# Calculate previous ball dist
last_ball = self.last_frame.ball
ball = self.frame.ball
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_ball_dist = np.linalg.norm(goal - last_ball_pos)
# Calculate new ball dist
ball_pos = np.array([ball.x, ball.y])
ball_dist = np.linalg.norm(goal - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
if ball_dist_rw > 1:
print("ball_dist -> ", ball_dist_rw)
print(self.frame.ball)
print(self.frame.robots_blue)
print(self.frame.robots_yellow)
print("===============================")
return np.clip(ball_dist_rw, -1, 1)
def __energy_pen(self):
robot = self.frame.robots_blue[0]
# Sum of abs each wheel speed sent
energy = abs(robot.v_wheel0)\
+ abs(robot.v_wheel1)\
+ abs(robot.v_wheel2)\
+ abs(robot.v_wheel3)
return energy | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_hw_challenge/static_defenders.py | 0.674694 | 0.446314 | static_defenders.py | pypi |
import math
import random
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
class SSLHWDribblingEnv(SSLBaseEnv):
"""The SSL robot needs navigate a course while keeping the ball
Description:
The robot must navigate through a field with robots as obstacles,
while keeping the ball. The obstacles are placed on a straight line,
the course will be in a zigzag configuration, having checkpoints
defined as the space between two robots, the last checkpoint needs
to be passed three times
Observation:
Type: Box(4 + 7*n_robots_blue + 2*n_robots_yellow)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->10 id 0 Blue [X, Y, sin(theta), cos(theta), v_x, v_y, v_theta]
+2*i id i Yellow Robot [X, Y]
Actions:
Type: Box(4,)
Num Action
0 id 0 Blue Global X Direction Speed (%)
1 id 0 Blue Global Y Direction Speed (%)
2 id 0 Blue Angular Speed (%)
3 id 0 Blue Dribbler (%) (true if % is positive)
Reward:
1 every time the robot passes a checkpoint
Starting State:
Robot starts with the ball and obstacles are spaced with
pre defined values.
Episode Termination:
Course completed, 2 minutes (4800 steps), robot exits course limits or robot
reverse a checkpoint
"""
def __init__(self):
super().__init__(field_type=2, n_robots_blue=1,
n_robots_yellow=4, time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(4, ), dtype=np.float32)
n_obs = 5 + 8*self.n_robots_blue + 2*self.n_robots_yellow
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_obs, ),
dtype=np.float32)
self.checkpoints_count = 0
# Checkpoints nodes positions
self.node_0 = -0.5
self.node_1 = -1.
self.node_2 = -1.5
self.node_3 = -2.
self.field_margin = 1
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
print('Environment initialized')
def reset(self):
self.checkpoints_count = 0
return super().reset()
def _frame_to_observations(self):
observation = []
observation.append(((self.checkpoints_count/6)*2)-1)
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
observation.append(1 if self.frame.robots_blue[i].infrared else -1)
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
angle = self.frame.robots_blue[0].theta
v_x, v_y, v_theta = self.convert_actions(actions, np.deg2rad(angle))
cmd = Robot(yellow=False, id=0, v_x=v_x, v_y=v_y, v_theta=v_theta,
dribbler=True if actions[3] > 0 else False)
commands.append(cmd)
return commands
def convert_actions(self, action, angle):
"""Denormalize, clip to absolute max and convert to local"""
# Denormalize
v_x = action[0] * self.max_v
v_y = action[1] * self.max_v
v_theta = action[2] * self.max_w
# Convert to local
v_x, v_y = v_x*np.cos(angle) + v_y*np.sin(angle),\
-v_x*np.sin(angle) + v_y*np.cos(angle)
# clip by max absolute
v_norm = np.linalg.norm([v_x,v_y])
c = v_norm < self.max_v or self.max_v / v_norm
v_x, v_y = v_x*c, v_y*c
return v_x, v_y, v_theta
def _calculate_reward_and_done(self):
reward = 0
done = False
ball = self.frame.ball
last_ball = None or self.last_frame.ball
robot = self.frame.robots_blue[0]
# End episode in case of collision
for rbt in self.frame.robots_yellow.values():
if abs(rbt.v_x) > 0.05 or abs(rbt.v_y) > 0.05:
done = True
def robot_out_of_bounds(rbt):
if rbt.x < self.node_3 - self.field_margin or rbt.x > self.field_margin:
return True
if abs(rbt.y) > self.field_margin:
return True
return False
if robot_out_of_bounds(robot):
done = True
elif last_ball:
if self.checkpoints_count == 0:
if ball.x < self.node_0 and ball.x > self.node_1:
if last_ball.y >= 0 and ball.y < 0:
reward = 1
self.checkpoints_count += 1
elif self.checkpoints_count == 1:
if ball.x < self.node_1 and ball.x > self.node_2:
if last_ball.y < 0 and ball.y >= 0:
reward = 1
self.checkpoints_count += 1
elif self.checkpoints_count >= 2:
if self.checkpoints_count % 2 == 0:
if ball.x < self.node_2 and ball.x > self.node_3:
if last_ball.y >= 0 and ball.y < 0:
reward = 1
self.checkpoints_count += 1
if self.checkpoints_count == 7:
done = True
elif last_ball.y < 0 and ball.y >= 0:
done = True
else:
if ball.x > self.node_3 - self.field_margin and ball.x < self.node_3:
if last_ball.y < 0 and ball.y >= 0:
reward = 1
self.checkpoints_count += 1
done = done
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
# TODO
pos_frame: Frame = Frame()
pos_frame.ball = Ball(x=-0.1, y=0.)
pos_frame.robots_blue[0] = Robot(x=0., y=0., theta=180.)
pos_frame.robots_yellow[0] = Robot(x=self.node_0, y=0., theta=180.)
pos_frame.robots_yellow[1] = Robot(x=self.node_1, y=0., theta=180.)
pos_frame.robots_yellow[2] = Robot(x=self.node_2, y=0., theta=180.)
pos_frame.robots_yellow[3] = Robot(x=self.node_3, y=0., theta=180.)
return pos_frame | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_hw_challenge/dribbling.py | 0.685002 | 0.514278 | dribbling.py | pypi |
import math
import random
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Ball, Frame, Robot
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
class SSLPassEnduranceMAEnv(SSLBaseEnv):
"""The SSL robot needs to make a goal with contested possession
Description:
Observation:
Type: Box(4 + 6*n_robots_blue)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->13 id N Blue [X, Y, sin(theta), cos(theta), V_theta]
Actions:
Type: Box(3,)
Num Action
0 id i Blue Angular Speed (%)
1 id i Blue Kick x Speed (%)
2 id i Blue Dribbler (%) (true if % is positive)
Reward:
Starting State:
Episode Termination:
30 seconds (1200 steps) or wrong pass
"""
def __init__(self):
super().__init__(field_type=2, n_robots_blue=2,
n_robots_yellow=0, time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(self.n_robots_blue, 5),
dtype=np.float32)
n_obs = 4 + 9*self.n_robots_blue
self.stopped_steps = 0
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(
self.n_robots_blue, n_obs),
dtype=np.float32)
self.receiver_id = 1
self.shooter_id = 0
self.ball_grad_scale = np.linalg.norm([self.field.width/2,
self.field.length/2])/4
wheel_max_rad_s = 160
max_steps = 1200
self.energy_scale = ((wheel_max_rad_s * 4) * max_steps)
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
self.max_kick_x = 5.0
print('Environment initialized')
def get_rotated_obs(self):
robots_dict = dict()
for i in range(self.n_robots_blue):
robots_dict[i] = list()
robots_dict[i].append(self.norm_pos(self.frame.robots_blue[i].x))
robots_dict[i].append(self.norm_pos(self.frame.robots_blue[i].y))
robots_dict[i].append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
robots_dict[i].append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
robots_dict[i].append(self.norm_v(self.frame.robots_blue[i].v_x))
robots_dict[i].append(self.norm_v(self.frame.robots_blue[i].v_y))
robots_dict[i].append(self.norm_w(
self.frame.robots_blue[i].v_theta))
robots_dict[i].append(
1 if self.frame.robots_blue[i].infrared else 0)
robots_dict[i].append(1 if i == self.shooter_id else 0)
rotaded_obs = list()
for i in range(self.n_robots_blue):
aux_dict = {}
aux_dict.update(robots_dict)
rotated = list()
rotated = rotated + aux_dict.pop(i)
teammates = list(aux_dict.values())
for teammate in teammates:
rotated = rotated + teammate
rotaded_obs.append(rotated)
return rotaded_obs
def _frame_to_observations(self):
observations = list()
robots = self.get_rotated_obs()
for idx in range(self.n_robots_blue):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
observation += robots[idx]
observations.append(np.array(observation, dtype=np.float32))
observations = np.array(observations)
return observations
def reset(self):
self.reward_shaping_total = None
state = super().reset()
self.stopped_steps = 0
self.shooter_id, self.receiver_id = 0, 1
return state
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _get_commands(self, actions):
commands = []
actions[0][3] = actions[0][3] if abs(actions[0][3]) > 0.5 else 0
actions[1][3] = actions[1][3] if abs(actions[1][3]) > 0.5 else 0
for i in range(self.n_robots_blue):
angle = self.frame.robots_blue[i].theta
v_x, v_y, v_theta = self.convert_actions(actions[i], np.deg2rad(angle))
cmd = Robot(yellow=False, id=i, v_x=v_x, v_y=v_y, v_theta=v_theta,
kick_v_x=actions[i][3] * self.max_kick_x,
dribbler=True if actions[i][4] > 0 else False)
commands.append(cmd)
return commands
def convert_actions(self, action, angle):
"""Denormalize, clip to absolute max and convert to local"""
# Denormalize
v_x = action[0] * self.max_v
v_y = action[1] * self.max_v
v_theta = action[2] * self.max_w
# Convert to local
v_x, v_y = v_x*np.cos(angle) + v_y*np.sin(angle),\
-v_x*np.sin(angle) + v_y*np.cos(angle)
# clip by max absolute
v_norm = np.linalg.norm([v_x,v_y])
c = v_norm < self.max_v or self.max_v / v_norm
v_x, v_y = v_x*c, v_y*c
return v_x, v_y, v_theta
def _calculate_reward_and_done(self):
w_ball_grad = 1/self.ball_grad_scale
w_energy = 1/self.energy_scale
reward = {f'robot_{i}': 0 for i in range(self.n_robots_blue)}
done = False
if self.reward_shaping_total is None:
self.reward_shaping_total = {'n_passes': 0,
'ball_grad': 0}
for i in range(self.n_robots_blue):
self.reward_shaping_total[f'robot_{i}'] = {'energy': 0}
if self.frame.robots_blue[self.receiver_id].infrared:
for i in range(self.n_robots_blue):
reward[f'robot_{i}'] = 10
self.reward_shaping_total['n_passes'] += 1
self.stopped_steps = 0
self.shooter_id, self.receiver_id = self.receiver_id, self.shooter_id
else:
rw_ball_grad = w_ball_grad * self.__ball_grad_rw()
reward[f'robot_{self.shooter_id}'] += rw_ball_grad
reward[f'robot_{self.receiver_id}'] += rw_ball_grad
self.reward_shaping_total['ball_grad'] += rw_ball_grad
for i in range(self.n_robots_blue):
rw_energy = w_energy*self.__energy_pen(i)
reward[f'robot_{i}'] += rw_energy
self.reward_shaping_total[f'robot_{i}']['energy'] += rw_energy
if self.__bad_state():
for i in range(self.n_robots_blue):
reward[f'robot_{i}'] = -1
done = True
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
pos_frame: Frame = Frame()
def x(): return random.uniform(-2, 2)
def y(): return random.uniform(1.5, -1.5)
pos_frame.ball = Ball(x=x(), y=y())
factor = (pos_frame.ball.y/abs(pos_frame.ball.y))
offset = 0.09*factor
angle = 270 if factor > 0 else 90
pos_frame.robots_blue[0] = Robot(
x=pos_frame.ball.x, y=pos_frame.ball.y+offset, theta=angle
)
ball = np.array([pos_frame.ball.x,
pos_frame.ball.y])
recv_x = x()
while abs(recv_x - pos_frame.ball.x) < 1.5:
recv_x = x()
receiver = np.array([recv_x, -pos_frame.ball.y])
vect = receiver - ball
recv_angle = np.rad2deg(np.arctan2(vect[1], vect[0]) + np.pi)
pos_frame.robots_blue[1] = Robot(x=receiver[0],
y=receiver[1],
theta=recv_angle)
return pos_frame
def __bad_state(self):
# Check if dist between robots > 1.5
recv = np.array([self.frame.robots_blue[self.receiver_id].x,
self.frame.robots_blue[self.receiver_id].y])
shooter = np.array([self.frame.robots_blue[self.shooter_id].x,
self.frame.robots_blue[self.shooter_id].y])
min_dist = np.linalg.norm(recv - shooter) > 1.5
# Check if ball is in this rectangle
ball = np.array([self.frame.ball.x, self.frame.ball.y])
last_ball = np.array([self.last_frame.ball.x, self.last_frame.ball.y])
inside = -2 < ball[0] < 2 and -1.5 < ball[1] < 1.5
# Check if ball is stopped for too long
last_dist = np.linalg.norm(last_ball - recv)
dist = np.linalg.norm(ball - recv)
stopped = abs(last_dist - dist) < 0.01
if stopped:
self.stopped_steps += 1
else:
self.stopped_steps = 0
return self.stopped_steps > 20 or not inside or not min_dist
def __ball_grad_rw(self):
assert(self.last_frame is not None)
# Goal pos
goal = np.array([self.frame.robots_blue[self.receiver_id].x,
self.frame.robots_blue[self.receiver_id].y])
# Calculate previous ball dist
last_ball = self.last_frame.ball
ball = self.frame.ball
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_ball_dist = np.linalg.norm(goal - last_ball_pos)
# Calculate new ball dist
ball_pos = np.array([ball.x, ball.y])
ball_dist = np.linalg.norm(goal - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
return np.clip(ball_dist_rw, -1, 1)
def __energy_pen(self, idx):
robot = self.frame.robots_blue[idx]
# Sum of abs each wheel speed sent
energy = abs(robot.v_wheel0)\
+ abs(robot.v_wheel1)\
+ abs(robot.v_wheel2)\
+ abs(robot.v_wheel3)
return energy | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_hw_challenge/pass_endurance_ma.py | 0.67854 | 0.297064 | pass_endurance_ma.py | pypi |
import math
import random
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Ball, Frame, Robot
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
class SSLPassEnduranceEnv(SSLBaseEnv):
"""The SSL robot needs to make a goal with contested possession
Description:
Observation:
Type: Box(4 + 6*n_robots_blue)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->13 id N Blue [X, Y, sin(theta), cos(theta), V_theta]
Actions:
Type: Box(3,)
Num Action
0 id i Blue Angular Speed (%)
1 id i Blue Kick x Speed (%)
2 id i Blue Dribbler (%) (true if % is positive)
Reward:
Starting State:
Episode Termination:
30 seconds (1200 steps) or wrong pass
"""
original_vec = None
max_dist = 100
actions = {}
shooted = False
def __init__(self):
super().__init__(field_type=2, n_robots_blue=2,
n_robots_yellow=0, time_step=0.025)
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(3, ),
dtype=np.float32)
n_obs = 4 + 6*self.n_robots_blue
self.holding_steps = 0
self.stopped_steps = 0
self.recv_angle = 270
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_obs,),
dtype=np.float32)
self.receiver_id = 1
self.ball_grad_scale = np.linalg.norm([self.field.width/2,
self.field.length/2])/4
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
self.max_kick_x = 5.0
print('Environment initialized')
def _frame_to_observations(self):
observation = list()
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
observation.append(1 if self.frame.robots_blue[i].infrared else 0)
return np.array(observation, dtype=np.float32)
def reset(self):
self.reward_shaping_total = None
state = super().reset()
self.actions = {}
self.holding_steps = 0
self.stopped_steps = 0
self.shooted = False
return state
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _get_commands(self, actions):
commands = []
actions[1] = actions[1] if abs(actions[1]) > 0.5 else 0
self.actions = actions
cmd = Robot(yellow=False, id=0, v_x=0,
v_y=0, v_theta=actions[0] * self.max_w,
kick_v_x=actions[1] * self.max_kick_x,
dribbler=True if actions[2] > 0 else False)
commands.append(cmd)
cmd = Robot(yellow=False, id=1, v_x=0,
v_y=0, v_theta=0,
kick_v_x=0,
dribbler=True)
commands.append(cmd)
return commands
def _calculate_reward_and_done(self):
w_ball_grad = 1/self.ball_grad_scale
reward = 0
done = False
if self.reward_shaping_total is None:
self.reward_shaping_total = {'reversed_dist': 0,
'ball_grad': 0}
if self.frame.robots_blue[1].infrared:
reward += 1
done = True
else:
rw_ball_grad = w_ball_grad * self.__ball_grad_rw()
reward = rw_ball_grad
self.reward_shaping_total['ball_grad'] += rw_ball_grad
if self.__wrong_ball() or self.holding_steps > 15:
reward -= 1
done = True
if done:
ball = np.array([self.frame.ball.x, self.frame.ball.y])
recv = np.array([self.frame.robots_blue[1].x,
self.frame.robots_blue[1].y])
shooter = np.array([self.frame.robots_blue[0].x,
self.frame.robots_blue[0].y])
dist_robs = np.linalg.norm(recv - shooter)
dist_ball = np.linalg.norm(recv - ball)
reversed_dist = dist_robs - dist_ball
normed = reversed_dist/dist_robs
self.reward_shaping_total['reversed_dist'] = normed
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
pos_frame: Frame = Frame()
def x(): return random.uniform(-1.5, 1.5)
def y(): return random.uniform(1.5, -1.5)
pos_frame.ball = Ball(x=x(), y=y())
factor = (pos_frame.ball.y/abs(pos_frame.ball.y))
offset = 0.115*factor
angle = 270 if factor > 0 else 90
pos_frame.robots_blue[0] = Robot(
x=pos_frame.ball.x, y=pos_frame.ball.y+offset, theta=angle
)
shooter = np.array([pos_frame.robots_blue[0].x,
pos_frame.robots_blue[0].y])
recv_x = x()
while abs(recv_x - pos_frame.ball.x) < 1:
recv_x = x()
receiver = np.array([recv_x, -pos_frame.ball.y])
vect = receiver - shooter
recv_angle = np.rad2deg(np.arctan2(vect[1], vect[0]) + np.pi)
pos_frame.robots_blue[1] = Robot(x=receiver[0],
y=receiver[1],
theta=recv_angle)
return pos_frame
def __wrong_ball(self):
ball = np.array([self.frame.ball.x, self.frame.ball.y])
last_ball = np.array([self.last_frame.ball.x, self.last_frame.ball.y])
recv = np.array([self.frame.robots_blue[1].x,
self.frame.robots_blue[1].y])
shooter = np.array([self.frame.robots_blue[0].x,
self.frame.robots_blue[0].y])
comp_ball = np.array(ball*100, dtype=int)
comp_shoot = np.array(shooter*100, dtype=int)
comp_recv = np.array(recv*100, dtype=int)
inside_x = min(comp_recv[0], comp_shoot[0]) <= comp_ball[0] <= max(
comp_recv[0], comp_shoot[0])
inside_y = min(comp_recv[1], comp_shoot[1]) <= comp_ball[1] <= max(
comp_recv[1], comp_shoot[1])
not_inside = not(inside_x and inside_y)
last_dist = np.linalg.norm(last_ball - recv)
dist = np.linalg.norm(ball - recv)
stopped = abs(last_dist - dist) < 0.01
if stopped:
self.stopped_steps += 1
else:
self.stopped_steps = 0
return self.stopped_steps > 20 or not_inside
def __ball_grad_rw(self):
assert(self.last_frame is not None)
# Goal pos
goal = np.array([self.frame.robots_blue[1].x,
self.frame.robots_blue[1].y])
# Calculate previous ball dist
last_ball = self.last_frame.ball
ball = self.frame.ball
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_ball_dist = np.linalg.norm(goal - last_ball_pos)
# Calculate new ball dist
ball_pos = np.array([ball.x, ball.y])
ball_dist = np.linalg.norm(goal - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
return np.clip(ball_dist_rw, -1, 1) | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_hw_challenge/pass_endurance.py | 0.650578 | 0.38856 | pass_endurance.py | pypi |
import math
import random
from typing import Dict
import gym
import numpy as np
from rsoccer_gym.Entities import Frame, Robot, Ball
from rsoccer_gym.ssl.ssl_gym_base import SSLBaseEnv
class SSLGoToBallShootEnv(SSLBaseEnv):
"""The SSL robot needs to make a goal
Description:
One blue robot and a ball are placed on fixed position on a half
div B field, the robot is rewarded if it makes a goal
Observation:
Type: Box(4 + 7*n_robots_blue + 5*n_robots_yellow)
Normalized Bounds to [-1.2, 1.2]
Num Observation normalized
0->3 Ball [X, Y, V_x, V_y]
4->10 id 0 Blue [X, Y, sin(theta), cos(theta), v_x, v_y, v_theta]
+5*i id i Yellow Robot [X, Y, v_x, v_y, v_theta]
Actions:
Type: Box(5, )
Num Action
0 id 0 Blue Global X Direction Speed (%)
1 id 0 Blue Global Y Direction Speed (%)
2 id 0 Blue Angular Speed (%)
3 id 0 Blue Kick x Speed (%)
4 id 0 Blue Dribbler (%) (true if % is positive)
Reward:
1 if goal
Starting State:
Robot and ball on half opponent field size in different y.
Episode Termination:
Goal, ball leaves bounds or 60 seconds (2400 steps)
"""
def __init__(self, field_type=1, random_init=False, enter_goal_area=False):
super().__init__(field_type=field_type, n_robots_blue=1,
n_robots_yellow=0, time_step=0.025)
self.random_init = random_init
self.enter_goal_area= enter_goal_area
self.action_space = gym.spaces.Box(low=-1, high=1,
shape=(5, ), dtype=np.float32)
n_obs = 4 + 8*self.n_robots_blue + 2*self.n_robots_yellow
self.observation_space = gym.spaces.Box(low=-self.NORM_BOUNDS,
high=self.NORM_BOUNDS,
shape=(n_obs, ),
dtype=np.float32)
# scale max dist rw to 1 Considering that max possible move rw if ball and robot are in opposite corners of field
self.ball_dist_scale = np.linalg.norm([self.field.width, self.field.length/2])
self.ball_grad_scale = np.linalg.norm([self.field.width/2, self.field.length/2])/4
# scale max energy rw to 1 Considering that max possible energy if max robot wheel speed sent every step
wheel_max_rad_s = 160
max_steps = 1200
self.energy_scale = ((wheel_max_rad_s * 4) * max_steps)
# Limit robot speeds
self.max_v = 2.5
self.max_w = 10
self.kick_speed_x = 5.0
print('Environment initialized')
def reset(self):
self.reward_shaping_total = None
return super().reset()
def step(self, action):
observation, reward, done, _ = super().step(action)
return observation, reward, done, self.reward_shaping_total
def _frame_to_observations(self):
observation = []
observation.append(self.norm_pos(self.frame.ball.x))
observation.append(self.norm_pos(self.frame.ball.y))
observation.append(self.norm_v(self.frame.ball.v_x))
observation.append(self.norm_v(self.frame.ball.v_y))
for i in range(self.n_robots_blue):
observation.append(self.norm_pos(self.frame.robots_blue[i].x))
observation.append(self.norm_pos(self.frame.robots_blue[i].y))
observation.append(
np.sin(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(
np.cos(np.deg2rad(self.frame.robots_blue[i].theta))
)
observation.append(self.norm_v(self.frame.robots_blue[i].v_x))
observation.append(self.norm_v(self.frame.robots_blue[i].v_y))
observation.append(self.norm_w(self.frame.robots_blue[i].v_theta))
observation.append(1 if self.frame.robots_blue[i].infrared else 0)
for i in range(self.n_robots_yellow):
observation.append(self.norm_pos(self.frame.robots_yellow[i].x))
observation.append(self.norm_pos(self.frame.robots_yellow[i].y))
return np.array(observation, dtype=np.float32)
def _get_commands(self, actions):
commands = []
angle = self.frame.robots_blue[0].theta
v_x, v_y, v_theta = self.convert_actions(actions, np.deg2rad(angle))
cmd = Robot(yellow=False, id=0, v_x=v_x, v_y=v_y, v_theta=v_theta,
kick_v_x=self.kick_speed_x if actions[3] > 0 else 0.,
dribbler=True if actions[4] > 0 else False)
commands.append(cmd)
return commands
def convert_actions(self, action, angle):
"""Denormalize, clip to absolute max and convert to local"""
# Denormalize
v_x = action[0] * self.max_v
v_y = action[1] * self.max_v
v_theta = action[2] * self.max_w
# Convert to local
v_x, v_y = v_x*np.cos(angle) + v_y*np.sin(angle),\
-v_x*np.sin(angle) + v_y*np.cos(angle)
# clip by max absolute
v_norm = np.linalg.norm([v_x,v_y])
c = v_norm < self.max_v or self.max_v / v_norm
v_x, v_y = v_x*c, v_y*c
return v_x, v_y, v_theta
def _calculate_reward_and_done(self):
if self.reward_shaping_total is None:
self.reward_shaping_total = {
'goal': 0,
'rbt_in_gk_area': 0,
'done_ball_out': 0,
'done_ball_out_right': 0,
'done_rbt_out': 0,
'ball_dist': 0,
'ball_grad': 0,
'energy': 0
}
reward = 0
done = False
# Field parameters
half_len = self.field.length / 2
half_wid = self.field.width / 2
pen_len = self.field.penalty_length
half_pen_wid = self.field.penalty_width / 2
half_goal_wid = self.field.goal_width / 2
ball = self.frame.ball
robot = self.frame.robots_blue[0]
def robot_in_gk_area(rbt):
return rbt.x > half_len - pen_len and abs(rbt.y) < half_pen_wid
# Check if robot exited field right side limits
if robot.x < -0.2 or abs(robot.y) > half_wid:
done = True
self.reward_shaping_total['done_rbt_out'] += 1
# If flag is set, end episode if robot enter gk area
elif not self.enter_goal_area and robot_in_gk_area(robot):
done = True
self.reward_shaping_total['rbt_in_gk_area'] += 1
# Check ball for ending conditions
elif ball.x < 0 or abs(ball.y) > half_wid:
done = True
self.reward_shaping_total['done_ball_out'] += 1
elif ball.x > half_len:
done = True
if abs(ball.y) < half_goal_wid:
reward = 5
self.reward_shaping_total['goal'] += 1
else:
reward = 0
self.reward_shaping_total['done_ball_out_right'] += 1
elif self.last_frame is not None:
ball_dist_rw = self.__ball_dist_rw() / self.ball_dist_scale
self.reward_shaping_total['ball_dist'] += ball_dist_rw
ball_grad_rw = self.__ball_grad_rw() / self.ball_grad_scale
self.reward_shaping_total['ball_grad'] += ball_grad_rw
energy_rw = -self.__energy_pen() / self.energy_scale
self.reward_shaping_total['energy'] += energy_rw
reward = reward\
+ ball_dist_rw\
+ ball_grad_rw\
+ energy_rw
done = done
return reward, done
def _get_initial_positions_frame(self):
'''Returns the position of each robot and ball for the initial frame'''
if self.random_init:
half_len = self.field.length / 2
half_wid = self.field.width / 2
penalty_len = self.field.penalty_length
def x(): return random.uniform(0.3, half_len - penalty_len - 0.3)
def y(): return random.uniform(-half_wid + 0.1, half_wid - 0.1)
def theta(): return random.uniform(0, 360)
else:
def x(): return self.field.length / 4
def y(): return self.field.width / 8
def theta(): return 0
pos_frame: Frame = Frame()
def same_position_ref(obj, ref, dist):
if abs(obj.x - ref.x) < dist and abs(obj.y - ref.y) < dist:
return True
return False
pos_frame.ball = Ball(x=x(), y=y())
d_ball_rbt = (self.field.ball_radius + self.field.rbt_radius) * 1.1
pos_frame.robots_blue[0] = Robot(x=x(), y=-y(), theta=theta())
while same_position_ref(pos_frame.robots_blue[0],pos_frame.ball, d_ball_rbt):
pos_frame.robots_blue[0] = Robot(x=x(), y=y(), theta=theta())
return pos_frame
def __ball_dist_rw(self):
assert(self.last_frame is not None)
# Calculate previous ball dist
last_ball = self.last_frame.ball
last_robot = self.last_frame.robots_blue[0]
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_robot_pos = np.array([last_robot.x, last_robot.y])
last_ball_dist = np.linalg.norm(last_robot_pos - last_ball_pos)
# Calculate new ball dist
ball = self.frame.ball
robot = self.frame.robots_blue[0]
ball_pos = np.array([ball.x, ball.y])
robot_pos = np.array([robot.x, robot.y])
ball_dist = np.linalg.norm(robot_pos - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
if ball_dist_rw > 1:
print("ball_dist -> ", ball_dist_rw)
print(self.frame.ball)
print(self.frame.robots_blue)
print(self.frame.robots_yellow)
print("===============================")
return np.clip(ball_dist_rw, -1, 1)
def __ball_grad_rw(self):
assert(self.last_frame is not None)
# Goal pos
goal = np.array([self.field.length/2, 0.])
# Calculate previous ball dist
last_ball = self.last_frame.ball
ball = self.frame.ball
last_ball_pos = np.array([last_ball.x, last_ball.y])
last_ball_dist = np.linalg.norm(goal - last_ball_pos)
# Calculate new ball dist
ball_pos = np.array([ball.x, ball.y])
ball_dist = np.linalg.norm(goal - ball_pos)
ball_dist_rw = last_ball_dist - ball_dist
if ball_dist_rw > 1:
print("ball_dist -> ", ball_dist_rw)
print(self.frame.ball)
print(self.frame.robots_blue)
print(self.frame.robots_yellow)
print("===============================")
return np.clip(ball_dist_rw, -1, 1)
def __energy_pen(self):
robot = self.frame.robots_blue[0]
# Sum of abs each wheel speed sent
energy = abs(robot.v_wheel0)\
+ abs(robot.v_wheel1)\
+ abs(robot.v_wheel2)\
+ abs(robot.v_wheel3)
return energy | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/ssl/ssl_go_to_ball_shoot/ssl_gym_go_to_ball_shoot.py | 0.673084 | 0.476884 | ssl_gym_go_to_ball_shoot.py | pypi |
import numpy as np
import robosim
from typing import Dict, List
from rsoccer_gym.Entities import Frame, FrameVSS, FrameSSL, Field
class RSim:
def __init__(
self,
field_type: int,
n_robots_blue: int,
n_robots_yellow: int,
time_step_ms: int,
):
self.n_robots_blue = n_robots_blue
self.n_robots_yellow = n_robots_yellow
# Positions needed just to initialize the simulator
ball_pos = [0, 0, 0, 0]
blue_robots_pos = [[-0.2 * i, 0, 0]
for i in range(1, n_robots_blue + 1)]
yellow_robots_pos = [[0.2 * i, 0, 0]
for i in range(1, n_robots_yellow + 1)]
self.simulator = self._init_simulator(
field_type=field_type,
n_robots_blue=n_robots_blue,
n_robots_yellow=n_robots_yellow,
ball_pos=ball_pos,
blue_robots_pos=blue_robots_pos,
yellow_robots_pos=yellow_robots_pos,
time_step_ms=time_step_ms
)
self.field = self.get_field_params()
def reset(self, frame: Frame):
placement_pos = self._placement_dict_from_frame(frame)
self.simulator.reset(placement_pos["ball_pos"], placement_pos["blue_robots_pos"], placement_pos["yellow_robots_pos"])
def stop(self):
del self.simulator
def send_commands(self, commands):
raise NotImplementedError
def get_frame(self) -> Frame:
raise NotImplementedError
def get_field_params(self):
return Field(**self.simulator.get_field_params())
def _placement_dict_from_frame(self, frame: Frame):
replacement_pos: Dict[str, np.ndarray] = {}
ball_pos: List[float] = [
frame.ball.x,
frame.ball.y,
frame.ball.v_x,
frame.ball.v_y,
]
replacement_pos["ball_pos"] = np.array(ball_pos)
blue_pos: List[List[float]] = []
for robot in frame.robots_blue.values():
robot_pos: List[float] = [robot.x, robot.y, robot.theta]
blue_pos.append(robot_pos)
replacement_pos["blue_robots_pos"] = np.array(blue_pos)
yellow_pos: List[List[float]] = []
for robot in frame.robots_yellow.values():
robot_pos: List[float] = [robot.x, robot.y, robot.theta]
yellow_pos.append(robot_pos)
replacement_pos["yellow_robots_pos"] = np.array(yellow_pos)
return replacement_pos
def _init_simulator(
self,
field_type,
n_robots_blue,
n_robots_yellow,
ball_pos,
blue_robots_pos,
yellow_robots_pos,
time_step_ms,
):
raise NotImplementedError
class RSimVSS(RSim):
def send_commands(self, commands):
sim_commands = np.zeros(
(self.n_robots_blue + self.n_robots_yellow, 2), dtype=np.float64)
for cmd in commands:
if cmd.yellow:
rbt_id = self.n_robots_blue + cmd.id
else:
rbt_id = cmd.id
sim_commands[rbt_id][0] = cmd.v_wheel0
sim_commands[rbt_id][1] = cmd.v_wheel1
self.simulator.step(sim_commands)
def get_frame(self) -> FrameVSS:
state = self.simulator.get_state()
# Update frame with new state
frame = FrameVSS()
frame.parse(state, self.n_robots_blue, self.n_robots_yellow)
return frame
def _init_simulator(self, field_type, n_robots_blue, n_robots_yellow,
ball_pos, blue_robots_pos, yellow_robots_pos,
time_step_ms):
return robosim.VSS(
field_type,
n_robots_blue,
n_robots_yellow,
time_step_ms,
ball_pos,
blue_robots_pos,
yellow_robots_pos,
)
class RSimSSL(RSim):
def send_commands(self, commands):
sim_cmds = np.zeros(
(self.n_robots_blue + self.n_robots_yellow, 8), dtype=np.float64)
for cmd in commands:
if cmd.yellow:
rbt_id = self.n_robots_blue + cmd.id
else:
rbt_id = cmd.id
if cmd.wheel_speed:
sim_cmds[rbt_id][0] = cmd.wheel_speed
sim_cmds[rbt_id][1] = cmd.v_wheel0
sim_cmds[rbt_id][2] = cmd.v_wheel1
sim_cmds[rbt_id][3] = cmd.v_wheel2
sim_cmds[rbt_id][4] = cmd.v_wheel3
sim_cmds[rbt_id][5] = cmd.kick_v_x
sim_cmds[rbt_id][6] = cmd.kick_v_z
sim_cmds[rbt_id][7] = cmd.dribbler
else:
sim_cmds[rbt_id][0] = cmd.wheel_speed
sim_cmds[rbt_id][1] = cmd.v_x
sim_cmds[rbt_id][2] = cmd.v_y
sim_cmds[rbt_id][3] = cmd.v_theta
sim_cmds[rbt_id][5] = cmd.kick_v_x
sim_cmds[rbt_id][6] = cmd.kick_v_z
sim_cmds[rbt_id][7] = cmd.dribbler
self.simulator.step(sim_cmds)
def get_frame(self) -> FrameSSL:
state = self.simulator.get_state()
# Update frame with new state
frame = FrameSSL()
frame.parse(state, self.n_robots_blue, self.n_robots_yellow)
return frame
def _init_simulator(self, field_type, n_robots_blue, n_robots_yellow,
ball_pos, blue_robots_pos, yellow_robots_pos,
time_step_ms):
return robosim.SSL(
field_type,
n_robots_blue,
n_robots_yellow,
time_step_ms,
ball_pos,
blue_robots_pos,
yellow_robots_pos,
) | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/Simulators/rsim.py | 0.696991 | 0.389314 | rsim.py | pypi |
import socket
from typing import Dict, List
import numpy as np
from rsoccer_gym.Entities import Robot
from rsoccer_gym.Entities.Frame import FramePB
from rsoccer_gym.Simulators.rsim import RSim
import rsoccer_gym.Simulators.pb_fira.packet_pb2 as packet_pb2
from rsoccer_gym.Simulators.pb_fira.state_pb2 import *
class Fira(RSim):
def __init__(
self,
vision_ip="224.0.0.1",
vision_port=10002,
cmd_ip="127.0.0.1",
cmd_port=20011,
):
"""
Init SSLClient object.
Extended description of function.
Parameters
----------
ip : str
Multicast IP in format '255.255.255.255'.
port : int
Port up to 1024.
"""
self.vision_ip = vision_ip
self.vision_port = vision_port
self.com_ip = cmd_ip
self.com_port = cmd_port
self.com_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.com_address = (self.com_ip, self.com_port)
self.vision_sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)
self.vision_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.vision_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 128
)
self.vision_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1
)
self.vision_sock.bind((self.vision_ip, self.vision_port))
self.linear_speed_range = 1.15
self.robot_wheel_radius = 0.026
def get_field_params(self):
return {'field_width': 1.3, 'field_length': 1.5,
'penalty_width': 0.7, 'penalty_length': 0.15,
'goal_width': 0.4, 'goal_depth': 0.1}
def stop(self):
pass
def reset(self, frame: FramePB):
placement_pos = self._placement_dict_from_frame(frame)
pkt = packet_pb2.Packet()
ball_pos = placement_pos["ball_pos"][:2]
ball_pkt = pkt.replace.ball
ball_pkt.x = ball_pos[0]
ball_pkt.y = ball_pos[1]
robots_pkt = pkt.replace.robots
for i, robot in enumerate(placement_pos["blue_robots_pos"]):
rep_rob = robots_pkt.add()
rep_rob.position.robot_id = i+1
rep_rob.position.x = robot[0]
rep_rob.position.y = robot[1]
rep_rob.position.orientation = robot[2]
rep_rob.yellowteam = False
rep_rob.turnon = True
for i, robot in enumerate(placement_pos["yellow_robots_pos"]):
rep_rob = robots_pkt.add()
rep_rob.position.robot_id = i+1
rep_rob.position.x = robot[0]
rep_rob.position.y = robot[1]
rep_rob.position.orientation = robot[2]
rep_rob.yellowteam = True
rep_rob.turnon = True
# send commands
data = pkt.SerializeToString()
self.com_socket.sendto(data, self.com_address)
def get_frame(self):
"""Receive package and decode."""
data, _ = self.vision_sock.recvfrom(1024)
decoded_data = packet_pb2.Environment().FromString(data)
frame = FramePB()
frame.parse(decoded_data)
return frame
def send_commands(self, commands):
# prepare commands
pkt = packet_pb2.Packet()
d = pkt.cmd.robot_commands
# send wheel speed commands for each robot
for cmd in commands:
robot = d.add()
robot.id = cmd.id
robot.yellowteam = cmd.yellow
# convert from linear speed to angular speed
robot.wheel_left = cmd.v_wheel1 / self.robot_wheel_radius
robot.wheel_right = cmd.v_wheel2 / self.robot_wheel_radius
# send commands
data = pkt.SerializeToString()
self.com_socket.sendto(data, self.com_address)
def _placement_dict_from_frame(self, frame: FramePB):
replacement_pos: Dict[str, np.ndarray] = {}
ball_pos: List[float] = [
frame.ball.x,
frame.ball.y,
frame.ball.v_x,
frame.ball.v_y,
]
replacement_pos["ball_pos"] = np.array(ball_pos)
blue_pos: List[List[float]] = []
for robot in frame.robots_blue.values():
robot_pos: List[float] = [robot.x, robot.y, robot.theta]
blue_pos.append(robot_pos)
replacement_pos["blue_robots_pos"] = np.array(blue_pos)
yellow_pos: List[List[float]] = []
for robot in frame.robots_yellow.values():
robot_pos: List[float] = [robot.x, robot.y, robot.theta]
yellow_pos.append(robot_pos)
replacement_pos["yellow_robots_pos"] = np.array(yellow_pos)
return replacement_pos | /rsoccer_gym-1.4-py3-none-any.whl/rsoccer_gym/Simulators/fira.py | 0.772488 | 0.312639 | fira.py | pypi |
from abc import abstractmethod
from rsocket.wellknown_mimetype import WellKnowMimeTypes, MIME_TYPES_BY_NAME
class CompositeMetadata:
def __init__(self, source=None):
if source is None:
source = bytearray()
self.source = source
self.reader_index = 0
def get_source(self):
"""composite metadata bytes for Payload"""
return self.source
def add_wellknown_metadata(self, mime_type_id, metadata_bytes):
# mime id flag
self.source.append(mime_type_id | 0x80)
# metadata length
self.source += len(metadata_bytes).to_bytes(3, byteorder='big')
# metadata bytes
self.source += metadata_bytes
def add_custom_metadata(self, mime_type, metadata_bytes):
if mime_type in MIME_TYPES_BY_NAME:
self.add_wellknown_metadata(MIME_TYPES_BY_NAME[mime_type], metadata_bytes)
else:
mime_type_len = len(mime_type)
# mime id/length
self.source.append(mime_type_len)
# mime type/content
self.source += mime_type.encode("ascii")
# metadata length
self.source += mime_type_len.to_bytes(3, byteorder='big')
# metadata bytes
self.source += metadata_bytes
def rewind(self):
self.reader_index = 0
def __iter__(self):
return self
def __next__(self):
if len(self.source) <= self.reader_index:
raise StopIteration
else:
mime_id = self.source[self.reader_index]
# wellknown mime type
if mime_id > 0x80:
metadata_len_start = self.reader_index + 1
metadata_len = int.from_bytes(self.source[metadata_len_start:metadata_len_start + 3], byteorder='big')
metadata_bytes_start = metadata_len_start + 3
metadata_bytes = self.source[metadata_bytes_start:metadata_bytes_start + metadata_len]
self.reader_index = metadata_bytes_start + metadata_len
return ReservedMimeTypeEntry(mime_id - 0x80, metadata_bytes)
else:
mime_type_start = self.reader_index + 1
mime_type_len = mime_id
mime_type = self.source[mime_type_start:mime_type_start + mime_type_len].decode("ascii")
metadata_len_start = mime_type_start + mime_type_len
metadata_len = int.from_bytes(self.source[metadata_len_start:metadata_len_start + 3], byteorder='big')
metadata_bytes_start = metadata_len_start + 3
metadata_bytes = self.source[metadata_bytes_start:metadata_bytes_start + metadata_len]
self.reader_index = metadata_bytes_start + metadata_len
return ExplicitMimeTimeEntry(mime_type, metadata_bytes)
class CompositeMetadataEntry:
@abstractmethod
def get_content(self):
pass
@abstractmethod
def get_mime_type(self):
pass
class ReservedMimeTypeEntry(CompositeMetadataEntry):
def __init__(self, identifier, content):
self.content = content
self.mime_type = WellKnowMimeTypes.from_identifier(identifier).name
def get_content(self):
return self.content
def get_mime_type(self):
return self.mime_type
class ExplicitMimeTimeEntry(CompositeMetadataEntry):
def __init__(self, mime_type, content):
self.content = content
self.mime_type = mime_type
def get_mime_type(self):
return self.mime_type
def get_content(self):
return self.content
class TaggingMetadata:
def __init__(self, mime_type, source=None):
self.mime_type = mime_type
if source is None:
source = bytearray()
self.source = source
self.source = source
self.reader_index = 0
def __iter__(self):
return self
def __next__(self):
if len(self.source) <= self.reader_index:
raise StopIteration
else:
tag_length = self.source[self.reader_index]
tag_start = self.reader_index + 1
tag = self.source[tag_start:tag_start + tag_length].decode("utf-8")
self.reader_index = tag_start + tag_start + tag_length - 1
return tag
@classmethod
def from_tags(cls, mime_type, tags):
source = bytearray()
for tag in tags:
tag_bytes = bytes(tag, encoding='utf-8')
source.append(len(tag_bytes) & 0x7F)
source += tag_bytes
return TaggingMetadata(mime_type, source) | /rsockettest-1.0.0.tar.gz/rsockettest-1.0.0/rsocket/composite_metadata.py | 0.571408 | 0.164148 | composite_metadata.py | pypi |
from rsolve.hm import hm
from dataclasses import dataclass, field
import typing as t
Ext = t.TypeVar("Ext")
@dataclass
class TCEnv(t.Generic[Ext]):
ext: Ext
tvars: t.List[hm.HMT] = field(default_factory=list)
neqs: t.Set[t.Tuple[hm.HMT, hm.HMT]] = field(default_factory=list)
def new_tvar(self) -> hm.TVar:
i = len(self.tvars)
var = hm.TVar(i)
self.tvars.append(var)
return var
def load_tvar(self, i: int) -> hm.HMT:
return self.tvars[i]
def occur_in(self, i: int, hmt: hm.HMT) -> bool:
def contains(x: hm.HMT):
if isinstance(x, hm.TApp):
a = contains(x.fn)
b = contains(x.arg)
return a or b
elif isinstance(x, hm.TArrow):
a = contains(x.dom)
b = contains(x.codom)
return a or b
elif isinstance(x, hm.TTup):
elts = map(contains, x.elts)
return any(elts)
elif isinstance(x, hm.TForall):
return contains(x.inst)
elif isinstance(x, (hm.TNom, hm.TFresh)):
return False
else:
assert isinstance(x, hm.TVar)
if i == x.i: return True
var = self.load_tvar(x.i)
if var == x:
return False
return contains(var)
return contains(hmt)
def free(self, fresh_map: t.Dict[str, hm.HMT], to_fresh: hm.HMT):
def mk_free(x: hm.HMT):
if isinstance(x, (hm.TNom, hm.TVar)):
return x
if isinstance(x, hm.TApp):
return hm.TApp(mk_free(x.fn), mk_free(x.arg))
if isinstance(x, hm.TArrow):
return hm.TArrow(mk_free(x.dom), mk_free(x.codom))
if isinstance(x, hm.TTup):
return hm.TTup(tuple(map(mk_free, x.elts)))
if isinstance(x, hm.TFresh):
return fresh_map.get(x.s, x)
assert isinstance(x, hm.TForall)
freshs = x.vars
new_fresh_map = {
k: v
for k, v in fresh_map.items() if k not in freshs
}
return hm.TForall(freshs, self.free(new_fresh_map, x.inst))
return mk_free(to_fresh)
def prune(self, x: hm.HMT):
if isinstance(x, (hm.TFresh, hm.TNom)):
return x
if isinstance(x, hm.TForall):
return hm.TForall(x.vars, self.prune(x.inst))
if isinstance(x, hm.TApp):
return hm.TApp(self.prune(x.fn), self.prune(x.arg))
if isinstance(x, hm.TArrow):
return hm.TArrow(self.prune(x.dom), self.prune(x.codom))
if isinstance(x, hm.TTup):
return hm.TTup(tuple(map(self.prune, x.elts)))
assert isinstance(x, hm.TVar)
i = x.i
v = self.load_tvar(i)
if isinstance(v, hm.TVar) and v.i == i:
return x
v = self.tvars[i] = self.prune(v)
return v
def add_neq(self, l: hm.HMT, r: hm.HMT):
lr = tuple(sorted((l, r)))
self.neqs.add(lr)
def unify(self, unif: 'HMUnify'):
if not unif.is_pos:
self.add_neq(unif.l, unif.r)
else:
self._unify_root(unif.l, unif.r)
def _unify_root(self, l, r):
l = self.prune(l)
r = self.prune(r)
def unify_rec(l, r):
if isinstance(l, hm.TForall):
free_map = {v: self.new_tvar() for v in l.vars}
inst = self.free(free_map, l.inst)
return self._unify_root(inst, r)
if isinstance(r, hm.TForall):
return unify_rec(r, l)
if isinstance(l, hm.TNom) and isinstance(r, hm.TNom):
return l.nom == r.nom
if isinstance(l, hm.TVar) and isinstance(r, hm.TVar):
if l.i == r.i:
return True
if self.occur_in(l.i, r):
raise TypeError(
"Ill-formed type definition like a = a -> b")
self.tvars[l.i] = r
return True
if isinstance(l, hm.TVar):
self.tvars[l.i] = r
return True
if isinstance(r, hm.TVar):
return unify_rec(r, l)
if isinstance(l, hm.TApp) and isinstance(r, hm.TApp):
return self._unify_root(l.fn, r.fn) and self._unify_root(
l.arg, r.arg)
if isinstance(l, hm.TArrow) and isinstance(r, hm.TArrow):
return self._unify_root(l.dom, r.dom) and self._unify_root(
l.codom, r.codom)
if isinstance(l, hm.TTup) and isinstance(r, hm.TTup):
if len(l.elts) != len(r.elts):
return False
return all(
self._unify_root(a1, a2) for a1, a2 in zip(l.elts, r.elts))
raise TypeError(f"{l} ? {r}")
return unify_rec(l, r)
@dataclass(frozen=True, order=True)
class HMUnify:
l: hm.HMT
r: hm.HMT
is_pos: bool = True
def not_a(self):
return [HMUnify(self.l, self.r, is_pos=not self.is_pos)] | /rsolve.py-0.1-py3-none-any.whl/rsolve/hm/unification.py | 0.644001 | 0.335405 | unification.py | pypi |
<img src="https://github.com/XiongPengNUS/rsome/blob/master/rsologo.png?raw=true" width=100>
# RSOME: Robust Stochastic Optimization Made Easy
[](https://pypi.org/project/rsome/)
[](https://pypi.org/project/rsome/)
[](https://github.com/XiongPengNUS/rsome/graphs/commit-activity)
[](https://github.com/XiongPengNUS/rsome/graphs/commit-activity)
[](https://github.com/XiongPengNUS/rsome/actions/workflows/test.yml)
[](https://github.com/XiongPengNUS/rsome/actions/workflows/pages/pages-build-deployment)
[](http://www.repostatus.org/#active)


- Website: [RSOME for Python](https://xiongpengnus.github.io/rsome/)
- PyPI: [RSOME 1.2.1](https://pypi.org/project/rsome/)
RSOME (Robust Stochastic Optimization Made Easy) is an open-source Python package for generic modeling of optimization problems (subject to uncertainty). Models in RSOME are constructed by variables, constraints, and expressions that are formatted as N-dimensional arrays. These arrays are consistent with the NumPy library in terms of syntax and operations, including broadcasting, indexing, slicing, element-wise operations, and matrix calculation rules, among others. In short, RSOME provides a convenient platform to facilitate developments of robust optimization models and their applications.
## Content
- [Installation](#section2)
- [Solver interfaces](#section3)
- [Getting started](#section4)
- [Team](#section5)
- [Citation](#section6)
## Installation <a id="section2"></a>
The RSOME package can be installed by using the <code>pip</code> command:
***
**`pip install rsome`**
***
### Solver interfaces <a id="section3"></a>
The RSOME package transforms robust or distributionally robust optimization models into deterministic linear or conic programming problems, and solved by external solvers. Details of compatible solvers and their interfaces are presented in the following table.
| Solver | License type | Required version | RSOME interface | Second-order cone constraints| Exponential cone constraints | Semidefiniteness constraints
|:-------|:--------------|:-----------------|:----------------|:------------------------|:---------------------|:--------------|
|[scipy.optimize](https://docs.scipy.org/doc/scipy/reference/optimize.html)| Open-source | >= 1.9.0 | `lpg_solver` | No | No | No |
|[CyLP](https://github.com/coin-or/cylp)| Open-source | >= 0.9.0 | `clp_solver` | No | No | No |
|[OR-Tools](https://developers.google.com/optimization/install) | Open-source | >= 7.5.7466 | `ort_solver` | No | No | No |
|[ECOS](https://github.com/embotech/ecos-python) | Open-source | >= 2.0.10 | `eco_solver` | Yes | Yes | No |
|[Gurobi](https://www.gurobi.com/documentation/9.0/quickstart_mac/ins_the_anaconda_python_di.html)| Commercial | >= 9.1.0 | `grb_solver` | Yes | No | No |
|[Mosek](https://docs.mosek.com/9.2/pythonapi/install-interface.html) | Commercial | >= 10.0.44 | `msk_solver` | Yes | Yes | Yes |
|[CPLEX](https://www.ibm.com/support/knowledgecenter/en/SSSA5P_12.8.0/ilog.odms.cplex.help/CPLEX/GettingStarted/topics/set_up/Python_setup.html) | Commercial | >= 12.9.0.0 | `cpx_solver` | Yes | No | No |
|[COPT](https://www.shanshu.ai/copt) | Commercial | >= 6.5.3 | `cpt_solver` | Yes | No | No |
## Getting started <a id="section4"></a>
Documents of RSOME are provided as follows:
- [RSOME quick start](https://xiongpengnus.github.io/rsome/)
- [RSOME users guide](https://xiongpengnus.github.io/rsome/user_guide)
- [Application examples](https://xiongpengnus.github.io/rsome/examples)
## Team <a id="section5"></a>
RSOME is a software project supported by Singapore Ministry of Education Tier 3 Grant *Science of Prescriptive Analytics*. It is primarly developed and maintained by [Zhi Chen](https://www.cb.cityu.edu.hk/staff/zchen96/), [Melvyn Sim](https://bizfaculty.nus.edu.sg/faculty-details/?profId=127), and [Peng Xiong](https://bizfaculty.nus.edu.sg/faculty-details/?profId=543). Many other researchers, including Erick Delage, Zhaowei Hao, Long He, Zhenyu Hu, Jun Jiang, Brad Sturt, Qinshen Tang, as well as anonymous users and paper reviewers, have helped greatly in the way of developing RSOME.
## Citation <a id="section6">
If you use RSOME in your research, please cite our papers:
- Chen, Zhi, and Peng Xiong. 2023. [RSOME in Python: an open-source package for robust stochastic optimization made easy](https://pubsonline.informs.org/doi/abs/10.1287/ijoc.2023.1291). Forthcoming in <i>INFORMS Journal on Computing</i>.
- Chen, Zhi, Melvyn Sim, Peng Xiong. 2020. [Robust stochastic optimization made easy with RSOME](https://pubsonline.informs.org/doi/abs/10.1287/mnsc.2020.3603). <i>Management Science</i> <b>66</b>(8) 3329-3339.
Bibtex entry:
```
@article{chen2021rsome,
title={RSOME in Python: an open-source package for robust stochastic optimization made easy},
author={Chen, Zhi and Xiong, Peng},
journal={INFORMS Journal on Computing},
year={2023},
month={Mar},
day={30},
publisher={INFORMS}
}
```
```
@article{chen2020robust,
title={Robust stochastic optimization made easy with RSOME},
author={Chen, Zhi and Sim, Melvyn and Xiong, Peng},
journal={Management Science},
volume={66},
number={8},
pages={3329--3339},
year={2020},
publisher={INFORMS}
}
```
| /rsome-1.2.1.tar.gz/rsome-1.2.1/README.md | 0.531209 | 0.946597 | README.md | pypi |
from __future__ import annotations
import copy
from operator import attrgetter
from typing import (
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from dataclasses import dataclass, field
from rsoup.python.models.context import (
Attribute,
Text,
Linebreak,
ContentHierarchy,
)
@dataclass
class Tree:
id: int
value: str = ""
block: bool = False
tag: Optional[str] = None
# attrs is empty when it's a block
attrs: Attribute = field(default_factory=dict) # type: ignore
children: List[Tree] = field(default_factory=list)
# telling if the tree is verified that the structure is correct (inline only contains inline)
is_verified: bool = False
def descendants(self) -> Iterable[Tree]:
yield self
for c in self.children:
yield from c.descendants()
def clone_without_children(self) -> Tree:
return Tree(
id=self.id,
value=self.value,
block=self.block,
tag=self.tag,
attrs=copy.copy(self.attrs),
children=[],
is_verified=self.is_verified,
)
def fix_tree(self) -> List[Tree]:
"""An inline element can't contain any block element. if it happens,
we need to split it
"""
# no children, no need to fix
if len(self.children) == 0:
newself = self.clone_without_children()
newself.is_verified = True
return [newself]
if self.block:
newself = self.clone_without_children()
children = []
for c in self.children:
children += c.fix_tree()
newself.children = children
newself.is_verified = True
return [newself]
# this tree is an inline element,
trees: List[Tree] = [self.clone_without_children()]
trees[-1].is_verified = True
for c in self.children:
subtrees = c.fix_tree()
if c.block:
trees += subtrees
else:
for subtree in subtrees:
if subtree.block:
trees += subtree.fix_tree()
else:
if trees[-1].block:
# parent of this subtree should still be the current node
trees.append(
Tree(
id=self.id,
value="",
block=False,
tag=self.tag,
attrs=self.attrs,
children=[],
is_verified=True,
)
)
trees[-1].children.append(subtree)
return trees
PageElement = Union[Tag, NavigableString]
class ContextExtractor:
"""Extracting context that leads to an element in an HTML page
Assuming that the page follows tree structure. Each header element
represents a level (section) in the tree.
This extractor tries to does it best to detect which text should be kept in the same line
and which one is not. However, it does not take into account the style of element (display: block)
and hence has to rely on some heuristics. For example, <canvas> is an inline element, however, it
is often used as block element so this extractor put it in another line.
"""
# list of inline elements that will be rendered in same line except <br> tags
# https://developer.mozilla.org/en-US/docs/Web/HTML/Inline_elements
# fmt: off
INLINE_ELEMENTS = {
"a", "abbr", "acronym", "audio", "b",
"bdi", "bdo", "big", "button", "cite", "canvas",
"code", "data", "datalist", "del", "dfn", "em",
"embed", "i", "iframe", "img", "input", "ins",
"kbd", "label", "map", "mark", "meter",
"object", "output", "picture", "progress", "q",
"ruby", "s", "samp", "select", "slot",
"small", "span", "strong", "sub", "sup", "svg", "template",
"textarea", "time", "u", "tt", "var", "video", "wbr"
}
BLOCK_ELEMENTS = {
"body", "br", "address", "article", "aside",
"blockquote", "details", "dialog", "dd", "div",
"dl", "dt", "fieldset", "figcaption", "figure",
"footer", "form", "h1", "h2", "h3", "h4", "h5",
"h6", "header", "hgroup", "hr", "li", "main",
"nav", "ol", "p", "pre", "section", "table", "ul"
}
IGNORE_ELEMENTS = {"script", "style", "noscript"}
SKIP_CONTENT_BLOCK_ELEMENTS = {"table"}
SAME_CONTENT_LEVEL_ELEMENTS = {"table", "h1", "h2", "h3", "h4", "h5", "h6"}
HEADER_ELEMENTS = {"h1", "h2", "h3", "h4", "h5", "h6"}
RICH_ELEMENTS = {"a", "b", "i", "h1", "h2", "h3", "h4", "h5", "h6"}
# fmt: on
def __init__(self, doc: BeautifulSoup):
self.doc = doc
def extract(self, el: PageElement) -> List[ContentHierarchy]:
"""Extract context tree that leads to the given element"""
# travel up the tree to find the the parents
tree_before, tree_after = self.locate_content_before_and_after(el)
if tree_before is None:
context_before = []
else:
tree = self.get_tree(tree_before)
# fix errors in the tree, so that inline element only contains inline elements
trees = tree.fix_tree()
context_before = [
item for tree in trees for item in self.flatten_tree(tree)
]
context_before = self.optimize_flatten_tree(context_before)
if tree_after is None:
context_after = []
else:
tree = self.get_tree(tree_after)
trees = tree.fix_tree()
context_after = [item for tree in trees for item in self.flatten_tree(tree)]
context_after = self.optimize_flatten_tree(context_after)
context = [ContentHierarchy(level=0, heading="")]
i = 0
while i < len(context_before):
c = context_before[i]
if isinstance(c, Linebreak):
context[-1].content_before.append(c)
i += 1
continue
headers = self.HEADER_ELEMENTS.intersection(c.tags)
if len(headers) == 0:
context[-1].content_before.append(c)
i += 1
continue
assert len(headers) == 1
header = list(headers)[0]
context.append(ContentHierarchy(level=int(header[1:]), heading=c.value))
i += 1
while i < len(context_before):
# now if the next one is not line break and is the same header, we can merge them
nc = context_before[i]
if isinstance(nc, Linebreak):
break
assert self.HEADER_ELEMENTS.intersection(nc.tags) == headers
context[-1].heading += nc.value
i += 1
# we do another filter to make sure the content is related to the element
# that the header leading to this element must be increasing
rev_context = []
header = 10
for i in range(len(context) - 1, -1, -1):
if context[i].level < header:
rev_context.append(context[i])
header = context[i].level
context = list(reversed(rev_context))
context[-1].content_after = context_after
return context
def locate_content_before_and_after(
self, element: PageElement
) -> Tuple[Optional[Tag], Optional[Tag]]:
"""Finding surrounding content of the element.
Assuming elements in the document is rendered from top to bottom and
left to right. In other words, there is no CSS that do float right/left
to make pre/after elements to be appeared out of order.
Currently, (the logic is not good)
* to determine the content before the element, we just keep all elements rendered
before this element (we are doing another filter outside of this function in `self.extract`).
* to determine the content after the element, we consider only the siblings
and stop before they hit a block element (not all block elements) that may be in the same level such as table, etc.
"""
tree_before = None
tree_after = None
el = element
while el.parent is not None and el.parent.name != "html":
parent = self.copy_node_without_children(el.parent)
for e in el.parent.contents:
if e is el:
# this is the index
break
parent.append(copy.copy(e))
if tree_before is not None:
parent.append(tree_before)
tree_before = parent
el = el.parent
el = element
if el.parent is not None:
for i, e in enumerate(el.parent.contents):
if e is el:
if i < len(el.parent.contents) - 1:
tree_after = self.copy_node_without_children(el.parent)
for e2 in el.parent.contents[i + 1 :]:
if (
isinstance(e2, Tag)
and e2.name in self.SAME_CONTENT_LEVEL_ELEMENTS
):
break
tree_after.append(copy.copy(e2))
break
assert tree_before is not None and isinstance(tree_before, Tag)
return tree_before, tree_after
def get_tree(
self,
el: PageElement,
id_container: Optional[dict] = None,
) -> Tree:
"""Convert element to tree"""
if id_container is None:
id_container = {"id": -1}
if isinstance(el, NavigableString):
id_container["id"] += 1
return Tree(id=id_container["id"], value=el.get_text())
id_container["id"] += 1
tree_id = id_container["id"]
value = ""
if el.name in self.SKIP_CONTENT_BLOCK_ELEMENTS:
children = []
else:
children = [
self.get_tree(c, id_container)
for c in cast(List[PageElement], el.contents)
if isinstance(c, NavigableString) or c.name not in self.IGNORE_ELEMENTS
]
if len(children) == 1 and children[0].tag is None:
# one child and it's a string, so we undo it to reduce the tree depth
value = children[0].value
children = []
id_container["id"] -= 1
if el.name in self.RICH_ELEMENTS:
attrs = self.extract_rich_text_attrs(el)
else:
attrs: Attribute = {}
return Tree(
id=tree_id,
value=value,
block=el.name in self.BLOCK_ELEMENTS,
tag=el.name,
attrs=attrs,
children=children,
)
def flatten_tree(self, tree: Tree) -> List[Union[Text, Linebreak]]:
"""Assuming that the tree is already fixed"""
assert tree.is_verified
output = []
if tree.block:
assert tree.tag is not None
if tree.value != "":
assert len(tree.attrs) == 0
output.append(
Text(
id=str(tree.id),
value=tree.value,
tags=[tree.tag],
id2attrs={},
)
)
for c in tree.children:
for sc in self.flatten_tree(c):
if isinstance(sc, Text):
sc.tags.append(tree.tag)
output.append(sc)
if c.block:
output.append(Linebreak())
# remove the last line break
if len(output) > 0 and isinstance(output[-1], Linebreak):
output.pop()
return output
# inline element
if len(tree.children) == 0:
tags = []
id2attrs = {}
if tree.tag is not None:
tags.append(tree.tag)
if len(tree.attrs) > 0:
id2attrs[str(tree.id)] = tree.attrs
return [
Text(id=str(tree.id), value=tree.value, tags=tags, id2attrs=id2attrs)
]
assert tree.tag is not None
for c in tree.children:
for subc in self.flatten_tree(c):
assert isinstance(subc, Text)
subc = copy.deepcopy(subc)
subc.tags.append(tree.tag)
if len(tree.attrs) > 0:
subc.id2attrs[str(tree.id)] = tree.attrs
output.append(subc)
return output
def optimize_flatten_tree(
self, lst: List[Union[Text, Linebreak]], merge_empty_lines: bool = True
) -> List[Union[Text, Linebreak]]:
"""Merge consecutive elements to make the list more compact"""
if len(lst) == 0:
return lst
if (
merge_empty_lines
and isinstance(lst[0], Text)
and lst[0].value.strip() == ""
):
new_lst: List[Union[Text, Linebreak]] = [
Linebreak(lst[0].value.count("\n"))
]
else:
new_lst: List[Union[Text, Linebreak]] = [lst[0]]
for i in range(1, len(lst)):
last_item = new_lst[-1]
item = lst[i]
if (
merge_empty_lines
and isinstance(item, Text)
and item.value.strip() == ""
):
item = Linebreak(item.value.count("\n"))
if isinstance(item, Linebreak):
if isinstance(last_item, Linebreak):
last_item.n_lines += item.n_lines
else:
new_lst.append(item)
else:
if isinstance(last_item, Linebreak):
new_lst.append(item)
else:
# same text, merge them if they share the same tags and non empty id2attrs
mergable = (
item.tags == last_item.tags and item.id2attrs == last_item.id
)
if mergable:
last_item.value += item.value
else:
new_lst.append(item)
return new_lst
def extract_rich_text_attrs(self, e: Tag) -> Attribute:
if e.name == "a":
return {"href": e.attrs.get("href", "")}
return {}
def copy_node_without_children(self, node: Tag) -> Tag:
contents = node.contents
node.contents = []
newnode = copy.copy(node)
node.contents = contents
return newnode | /python/context_extractor.py | 0.763175 | 0.242519 | context_extractor.py | pypi |
from typing import (
Dict,
List,
Set,
TypedDict,
Union,
)
from dataclasses import asdict, dataclass, field
class Attribute(TypedDict, total=False):
href: str
@dataclass
class Text:
id: str
value: str = ""
tags: List[str] = field(default_factory=list)
id2attrs: Dict[str, Attribute] = field(default_factory=dict)
def to_dict(self):
return {
"id": self.id,
"value": self.value,
"tags": self.tags,
"id2attrs": {k: v for k, v in self.id2attrs.items()},
}
@staticmethod
def from_dict(obj: dict):
return Text(
id=obj["id"],
value=obj["value"],
tags=obj["tags"],
id2attrs=obj["id2attrs"],
)
@staticmethod
def from_tuple(obj: tuple):
return Text(obj[0], obj[1], obj[2], obj[3])
def __str__(self):
return self.value
@dataclass
class Linebreak:
n_lines: int = 1
def to_dict(self):
return {"n_lines": self.n_lines}
@staticmethod
def from_dict(obj: dict):
return Linebreak(
n_lines=obj["n_lines"],
)
def __str__(self):
return f"\n{{{self.n_lines}}}"
@dataclass
class ContentHierarchy:
"""Content at each level that leads to the table"""
level: int # level of the heading, level 0 indicate the beginning of the document but should not be used
heading: str # title of the level (header)
# partially HTML content, normalized <a>, <b>, <i> tags (breaklines or block text such as div, p are converted to line breaks)
# other HTML containing content such as <table>, <img>, <video>, <audio> is kept as empty tag.
content_before: List[Union[Text, Linebreak]] = field(default_factory=list)
# only not empty if this is not the same level as the table.
content_after: List[Union[Text, Linebreak]] = field(default_factory=list)
def to_dict(self):
return {
"level": self.level,
"heading": self.heading,
"content_before": [c.to_dict() for c in self.content_before],
"content_after": [c.to_dict() for c in self.content_after],
}
@staticmethod
def from_dict(obj: dict):
return ContentHierarchy(
level=obj["level"],
heading=obj["heading"],
content_before=[
Text.from_dict(c) if "id" in c else Linebreak.from_dict(c)
for c in obj["content_before"]
],
content_after=[
Text.from_dict(c) if "id" in c else Linebreak.from_dict(c)
for c in obj["content_after"]
],
)
@staticmethod
def from_tuple(obj: tuple):
return ContentHierarchy(
level=obj[0],
heading=obj[1],
content_before=[
Text.from_tuple(c) if len(c) > 1 else Linebreak(c[0]) for c in obj[2]
],
content_after=[
Text.from_tuple(c) if len(c) > 1 else Linebreak(c[0]) for c in obj[3]
],
) | /python/models/context.py | 0.833121 | 0.28872 | context.py | pypi |
import json
import os
from os.path import dirname, join
from typing import Dict
from jinja2 import Template
from notebook.base.handlers import APIHandler
SUPPORTED_QUERY_TYPES = ["portal"]
class UnsupportedQueryTypeError(Exception):
pass
class UnimplementedQueryResolutionError(Exception):
pass
class Query_handler(APIHandler):
"""
RSP templated Query Handler.
"""
@property
def rubinquery(self) -> Dict[str, str]:
return self.settings["rubinquery"]
def post(self) -> None:
"""POST receives the query type and the query value as a JSON
object containing "type" and "value" keys. Each is a string.
"type" is currently limited to "portal" (generally: it
must be in SUPPORTED_QUERY_TYPES).
For a Portal Query, "value" is the URL referring to that query.
The interpretation of "value" is query-type dependent.
Generally, the post will load a notebook template from the
"templates" directory (relative to this handler) whose name is
<type>_query.ipynb.template.
It will then use the value to resolve the template, and will write
a file with the template resolved under the user's
"$HOME/notebooks/queries" directory. That filename will also be
derived from the type and value.
"""
input_str = self.request.body.decode("utf-8")
input_document = json.loads(input_str)
q_type = input_document["type"]
q_value = input_document["value"]
if q_type not in SUPPORTED_QUERY_TYPES:
raise UnsupportedQueryTypeError(
f"{q_type} is not a supported query type"
)
q_fn = self._create_query(q_type, q_value)
self.finish(q_fn)
def _create_query(self, q_type: str, q_value: str) -> str:
dir: str = join(dirname(__file__), "templates")
fn = join(dir, q_type + "_query.ipynb.template")
with open(fn) as f:
txt = f.read()
tmpl = Template(txt)
if q_type == "portal":
q_result = self._create_portal_query(q_value, tmpl)
else:
raise UnimplementedQueryResolutionError(
f"{q_type} does not have a method of template resolution"
)
return q_result
def _create_portal_query(self, q_value: str, tmpl: Template) -> str:
# The value should be a URL
url = q_value
q_id = q_value.split("/")[-1] # Last component is a unique query ID
nb = tmpl.render(
QUERYNAME=q_id,
QUERYURL=url,
)
r_qdir = join("notebooks", "queries")
qdir = join(os.getenv("HOME"), r_qdir)
os.makedirs(qdir, exist_ok=True)
fname = f"portal_{q_id}.ipynb"
r_fpath = join(r_qdir, fname)
fpath = join(qdir, fname)
with open(fpath, "wb") as f:
f.write(bytes(nb, "utf-8"))
retval = {
"status": 200,
"filename": fname,
"path": r_fpath,
"url": join(
os.environ.get("JUPYTERHUB_SERVICE_PREFIX"), "tree", r_fpath
),
"body": nb,
}
return json.dumps(retval) | /rsp_jupyter_extensions-0.8.4.tar.gz/rsp_jupyter_extensions-0.8.4/rsp_jupyter_extensions/query.py | 0.661486 | 0.233258 | query.py | pypi |
class DisplayIconDefinition:
def __init__(self, name, category, friendly_name, icon, flag_index, flag):
self.name = name
self.category = category
self.friendly_name = friendly_name
self.icon = icon
self.flag_index = flag_index
self.flag = flag
DISPLAY_ICON_DEFINITIONS = [
DisplayIconDefinition(
"rsp1570_input_analog", "input_icons", "Analog", "A", 0, 0x01
),
DisplayIconDefinition("rsp1570_input_5", "input_icons", "Input 5", "5", 0, 0x02),
DisplayIconDefinition("rsp1570_input_4", "input_icons", "Input 4", "4", 0, 0x04),
DisplayIconDefinition("rsp1570_input_3", "input_icons", "Input 3", "3", 0, 0x08),
DisplayIconDefinition("rsp1570_input_2", "input_icons", "Input 2", "2", 0, 0x10),
DisplayIconDefinition("rsp1570_input_1", "input_icons", "Input 1", "1", 0, 0x20),
DisplayIconDefinition(
"rsp1570_input_coaxial", "input_icons", "Coaxial", "Coaxial", 0, 0x40
),
DisplayIconDefinition(
"rsp1570_input_optical", "input_icons", "Optical", "Optical", 0, 0x80
),
DisplayIconDefinition(
"rsp1570_sound_mode_x", "sound_mode_icons", "x", "x", 1, 0x01
),
DisplayIconDefinition(
"rsp1570_sound_mode_ii", "sound_mode_icons", "II", "II", 1, 0x02
),
DisplayIconDefinition("rsp1570_input_hdmi", "input_icons", "HDMI", "HDMI", 1, 0x04),
DisplayIconDefinition(
"rsp1570_sound_mode_ex", "sound_mode_icons", "EX", "EX", 1, 0x08
),
DisplayIconDefinition(
"rsp1570_sound_mode_es", "sound_mode_icons", "ES", "ES", 1, 0x10
),
DisplayIconDefinition(
"rsp1570_sound_mode_dts", "sound_mode_icons", "dts", "dts", 1, 0x20
),
DisplayIconDefinition(
"rsp1570_sound_mode_pro_logic",
"sound_mode_icons",
"Pro Logic",
"Pro Logic",
1,
0x40,
),
DisplayIconDefinition(
"rsp1570_sound_mode_dolby_digital",
"sound_mode_icons",
"Dolby Digital",
"Dolby Digital",
1,
0x80,
),
DisplayIconDefinition(
"rsp1570_state_display_mode0",
"state_icons",
"Display Mode 0",
"Display Mode0",
2,
0x01,
),
DisplayIconDefinition(
"rsp1570_state_display_mode1",
"state_icons",
"Display Mode 1",
"Display Mode1",
2,
0x02,
),
DisplayIconDefinition(
"rsp1570_state_zone2", "state_icons", "Zone 2", "Zone 2", 2, 0x04
),
DisplayIconDefinition(
"rsp1570_state_standby_led",
"state_icons",
"Standby LED",
"Standby LED",
2,
0x08,
),
DisplayIconDefinition(
"rsp1570_speaker_center_back", "speaker_icons", "Center Back", "SB", 3, 0x01
),
DisplayIconDefinition(
"rsp1570_state_zone4", "state_icons", "Zone 4", "Zone 4", 3, 0x02
),
DisplayIconDefinition(
"rsp1570_state_zone3", "state_icons", "Zone 3", "Zone 3", 3, 0x04
),
DisplayIconDefinition("rsp1570_misc_lt", "misc_icons", "Misc <", "<", 3, 0x08),
DisplayIconDefinition("rsp1570_misc_gt", "misc_icons", "Misc >", ">", 3, 0x10),
DisplayIconDefinition(
"rsp1570_sound_mode_71", "sound_mode_icons", "7.1", "7.1", 3, 0x20
),
DisplayIconDefinition(
"rsp1570_sound_mode_51", "sound_mode_icons", "5.1", "5.1", 3, 0x40
),
DisplayIconDefinition("rsp1570_state_zone", "state_icons", "Zone", "Zone", 3, 0x80),
DisplayIconDefinition(
"rsp1570_speaker_center_back_left",
"speaker_icons",
"Center Back Left",
"CBL",
4,
0x01,
),
DisplayIconDefinition(
"rsp1570_speaker_center_back_right",
"speaker_icons",
"Center Back Right",
"CBR",
4,
0x02,
),
DisplayIconDefinition(
"rsp1570_speaker_subwoofer", "speaker_icons", "Subwoofer", "SW", 4, 0x04
),
DisplayIconDefinition(
"rsp1570_speaker_surround_right",
"speaker_icons",
"Surround Right",
"SR",
4,
0x08,
),
DisplayIconDefinition(
"rsp1570_speaker_surround_left", "speaker_icons", "Surround Left", "SL", 4, 0x10
),
DisplayIconDefinition(
"rsp1570_speaker_front_right", "speaker_icons", "Front Right", "FR", 4, 0x20
),
DisplayIconDefinition(
"rsp1570_speaker_center", "speaker_icons", "Center", "C", 4, 0x40
),
DisplayIconDefinition(
"rsp1570_speaker_front_left", "speaker_icons", "Front Left", "FL", 4, 0x80
),
]
DISPLAY_ICON_DEFINITIONS_BY_ICON = {d.icon: d for d in DISPLAY_ICON_DEFINITIONS}
# DISPLAY_ICON_DEFINITIONS_BY_NAME = {d.name: d for d in DISPLAY_ICON_DEFINITIONS}
def flags_to_icons(flags):
icons = {}
for d in DISPLAY_ICON_DEFINITIONS:
icons[d.icon] = bool(flags[d.flag_index] & d.flag)
return icons
def icon_list_to_flags(icon_list):
flags = bytearray([0x00] * 5)
for i in icon_list:
d = DISPLAY_ICON_DEFINITIONS_BY_ICON[i]
flags[d.flag_index] |= d.flag
return flags
def icon_dict_to_flags(icon_dict):
return icon_list_to_flags([i for i, v in icon_dict.items() if v])
def icons_that_are_on(icon_dict):
# TODO: Consistent order
return [i for (i, v) in icon_dict.items() if v] | /rsp1570serial-pp81381-0.1.5.tar.gz/rsp1570serial-pp81381-0.1.5/rsp1570serial/icons.py | 0.416678 | 0.255901 | icons.py | pypi |
import io
import logging
_LOGGER = logging.getLogger(__name__)
START_BYTE = 0xFE
ESCAPE_BYTE = 0xFD
class RotelProtocolError(Exception):
pass
class RotelEOFError(Exception):
pass
class RotelInvalidByteError(Exception):
pass
class RotelUnexpectedStartByteError(RotelInvalidByteError):
"""
For now this is treated the same as an invalid byte error.
However, it might make sense to treat it like the start of a new message,
in which case it would invalidate the old message but we would then reset
in read_payload and immediately start capturing content again.
"""
pass
class StreamProxy:
"""
Wraps an io.BytesIO object and presents an interface compatible with the Decoder.
Handy class for testing purposes.
"""
def __init__(self, message):
self.buf = io.BytesIO(message)
async def read(self, n):
return self.buf.read(n)
class ProtocolDecoder:
def __init__(self, ser):
self.ser = ser
self.bytes_received = 0
async def next_char_without_meta_decoding(self):
b = await self.ser.read(1)
if len(b) == 0:
raise RotelEOFError(
"Encountered EOF after {} bytes".format(self.bytes_received)
)
assert len(b) == 1
self.bytes_received += 1
return b[0]
async def next_char_with_meta_decoding(self):
c1 = await self.next_char_without_meta_decoding()
if c1 == START_BYTE:
raise RotelUnexpectedStartByteError(
"Unexpected unescaped start byte encountered within message content."
)
elif c1 != ESCAPE_BYTE:
return c1
c2 = await self.next_char_without_meta_decoding()
if c2 == 0x00:
return ESCAPE_BYTE
elif c2 == 0x01:
return START_BYTE
else:
raise RotelInvalidByteError(
"Invalid byte following ESCAPE_BYTE ({:X}).".format(c2)
)
async def wait_for_start_byte(self):
unexpected_bytes = bytearray()
while True:
try:
c = await self.next_char_without_meta_decoding()
except RotelEOFError:
if len(unexpected_bytes) > 0:
_LOGGER.warning(
"%d unexpected bytes discarded when EOF encountered: %r",
len(unexpected_bytes),
unexpected_bytes,
)
raise
if c == START_BYTE:
break
unexpected_bytes.append(c)
if len(unexpected_bytes) > 0:
_LOGGER.warning(
"%d unexpected bytes encountered while waiting for START_BYTE: %r",
len(unexpected_bytes),
unexpected_bytes,
)
_LOGGER.debug(
"Start byte encountered at byte %d in stream", self.bytes_received
)
async def read_payload(self):
await self.wait_for_start_byte()
content = bytearray()
try:
content.append(await self.next_char_with_meta_decoding())
# pylint: disable=unused-variable
for x in range(content[0]):
content.append(await self.next_char_with_meta_decoding())
content.append(await self.next_char_with_meta_decoding())
except RotelEOFError:
raise RotelProtocolError(
"Unexpected EOF encountered. Work in progress discarded: {}".format(
content
)
)
except RotelInvalidByteError:
raise RotelProtocolError(
"Invalid byte encountered while processing message content. Work in progress discarded: {}".format(
content
)
)
body = content[0:-1]
expected_checksum = content[-1:][0]
actual_checksum = calculate_checksum(body)
if expected_checksum != actual_checksum:
raise RotelProtocolError(
"Invalid checksum.\nBody: {!r}\nLen body: {}, Expected checksum: {:X}, Actual checksum: {:X}".format(
body, len(body), expected_checksum, actual_checksum
)
)
_LOGGER.debug("Valid content of length %d received: %r", len(content), content)
return content[1:-1]
async def decode_protocol_stream(ser):
decoder = ProtocolDecoder(ser)
while True:
try:
payload = await decoder.read_payload()
except RotelProtocolError as err:
_LOGGER.error(err)
except RotelEOFError:
break
else:
yield payload
_LOGGER.info("Finished reading messages")
def encode_payload(payload):
body = [len(payload)]
body.extend(payload)
checksum = calculate_checksum(body)
content = body
content.append(checksum)
message = meta_escape(content)
message.insert(0, START_BYTE)
return bytes(message)
def calculate_checksum(sequence):
cs = 0
for item in sequence:
cs += item
return cs & 0xFF
def meta_escape(raw_message):
message = []
for b in raw_message:
if b == ESCAPE_BYTE:
message.extend([ESCAPE_BYTE, 0x00])
elif b == START_BYTE:
message.extend([ESCAPE_BYTE, 0x01])
else:
message.append(b)
return message | /rsp1570serial-pp81381-0.1.5.tar.gz/rsp1570serial-pp81381-0.1.5/rsp1570serial/protocol.py | 0.555194 | 0.307696 | protocol.py | pypi |
import logging
from rsp1570serial.icons import flags_to_icons, icons_that_are_on
from rsp1570serial.protocol import decode_protocol_stream, encode_payload
_LOGGER = logging.getLogger(__name__)
DEVICE_ID_RSP1570 = 0xA3
MSGTYPE_PRIMARY_COMMANDS = 0x10
MSGTYPE_MAIN_ZONE_COMMANDS = 0x14
MSGTYPE_RECORD_SOURCE_COMMANDS = 0x15
MSGTYPE_ZONE_2_COMMANDS = 0x16
MSGTYPE_ZONE_3_COMMANDS = 0x17
MSGTYPE_ZONE_4_COMMANDS = 0x18
MSGTYPE_FEEDBACK_STRING = 0x20
MSGTYPE_TRIGGER_STATUS_STRING = 0x21
MSGTYPE_VOLUME_DIRECT_COMMANDS = 0x30
MSGTYPE_ZONE_2_VOLUME_DIRECT_COMMANDS = 0x32
MSGTYPE_ZONE_3_VOLUME_DIRECT_COMMANDS = 0x33
MSGTYPE_ZONE_4_VOLUME_DIRECT_COMMANDS = 0x34
MSGTYPE_TRIGGER_DIRECT_COMMANDS = 0x40
class RotelMessageError(Exception):
pass
class FeedbackMessage:
def __init__(self, line1, line2, flags):
self.lines = [line1, line2]
self.flags = flags
self.icons = flags_to_icons(flags)
def icons_that_are_on(self):
return icons_that_are_on(self.icons)
def log(self, level=logging.INFO):
_LOGGER.log(level, "Display line 1: '%s'", self.lines[0])
_LOGGER.log(level, "Display line 2: '%s'", self.lines[1])
_LOGGER.log(level, "Icons: %r", self.icons_that_are_on())
def parse_display_lines(self):
"""
Parse the display lines and return as much
as we can infer about the state of the amp.
Note that the maximum length of sources is 8 characters.
Display line 2 is an informational display with multiple
purposes so we decode what we can but it's up to the caller
to decide what to do when one item disappears when another
appears. It is copied out verbatim in the 'info' field and
it is probably safest to just display that to the user
and leave it at that.
"""
is_on = None
source_name = None
volume = None
mute_on = None
party_mode_on = None
info = None
rec_source = None
zone2_source = None
zone2_volume = None
zone3_source = None
zone3_volume = None
zone4_source = None
zone4_volume = None
line0 = self.lines[0]
if len(line0) != 21:
_LOGGER.error("Display line 1 must be exactly 21 bytes")
if (
line0
== "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
):
is_on = False
else:
is_on = True
source_name = line0[:8].rstrip()
party_mode_on = line0[10:13] == "pty"
vol_str = line0[14:]
if (vol_str == "MUTE ON") or (vol_str == " "):
mute_on = True
volume = None
elif vol_str[0:3] != "VOL":
_LOGGER.error("Could not verify VOL string: %s", vol_str)
else:
mute_on = False
volume = int(vol_str[3:])
line1 = self.lines[1]
if len(line1) != 21:
_LOGGER.error("Display line 2 must be exactly 21 bytes")
if (
line1
== "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
):
pass
else:
info = line1.strip().replace("\x19", "II")
if line1[:9] == " REC ":
rec_source = line1[9:].rstrip()
elif line1[:9] == " ZONE2 ":
zone2_source = line1[9:].rstrip()
elif line1[:14] == " ZONE2 VOL ":
zone2_volume = int(line1[14:16])
elif line1[:9] == " ZONE3 ":
zone3_source = line1[9:].rstrip()
elif line1[:14] == " ZONE3 VOL ":
zone3_volume = int(line1[14:16])
elif line1[:9] == " ZONE4 ":
zone4_source = line1[9:].rstrip()
elif line1[:14] == " ZONE4 VOL ":
zone4_volume = int(line1[14:16])
return {
"is_on": is_on,
"source_name": source_name,
"volume": volume,
"mute_on": mute_on,
"party_mode_on": party_mode_on,
"info": info,
"rec_source": rec_source,
"zone2_source": zone2_source,
"zone2_volume": zone2_volume,
"zone3_source": zone3_source,
"zone3_volume": zone3_volume,
"zone4_source": zone4_source,
"zone4_volume": zone4_volume,
}
class TriggerMessage:
def __init__(self, flags):
self.flags = flags
def log(self, level=logging.INFO):
_LOGGER.log(level, self.flags_to_list(self.flags))
@classmethod
def _flag_to_list(cls, flag):
out = []
out.append("on" if flag & 0x01 else "off")
out.append("on" if flag & 0x02 else "off")
out.append("on" if flag & 0x04 else "off")
out.append("on" if flag & 0x08 else "off")
out.append("on" if flag & 0x10 else "off")
out.append("on" if flag & 0x20 else "off")
return out
@classmethod
def flags_to_list(cls, flags):
out = []
out.append(["All", cls._flag_to_list(flags[0])])
out.append(["Main", cls._flag_to_list(flags[1])])
out.append(["Zone 2", cls._flag_to_list(flags[2])])
out.append(["Zone 3", cls._flag_to_list(flags[3])])
out.append(["Zone 4", cls._flag_to_list(flags[4])])
return out
class CommandMessage:
def __init__(self, message_type, key):
self.message_type = message_type
self.key = key
def feedback_message_handler(message_type, data):
assert message_type == MSGTYPE_FEEDBACK_STRING
display_line1_bytes = data[0:21] # The II is char 0x19
display_line2_bytes = data[21:42]
flags = data[42:47]
return FeedbackMessage(
display_line1_bytes.decode(encoding="ascii"),
display_line2_bytes.decode(encoding="ascii"),
flags,
)
# OFF Data was: bytearray(b'\x00\x00\x00\x00\x00')
# ON Data was: bytearray(b'\x01\x01\x00\x00\x00')
def trigger_message_handler(message_type, data):
assert message_type == MSGTYPE_TRIGGER_STATUS_STRING
assert len(data) == 5
return TriggerMessage(data)
def command_message_handler(message_type, data):
return CommandMessage(message_type, data)
def decode_message(payload):
if payload[0] != DEVICE_ID_RSP1570:
raise RotelMessageError(
"Didn't get expected Device ID byte. {} != {}".format(
payload[0], DEVICE_ID_RSP1570
)
)
message_handlers = {
MSGTYPE_FEEDBACK_STRING: feedback_message_handler,
MSGTYPE_TRIGGER_STATUS_STRING: trigger_message_handler,
MSGTYPE_PRIMARY_COMMANDS: command_message_handler,
MSGTYPE_MAIN_ZONE_COMMANDS: command_message_handler,
MSGTYPE_RECORD_SOURCE_COMMANDS: command_message_handler,
MSGTYPE_ZONE_2_COMMANDS: command_message_handler,
MSGTYPE_ZONE_3_COMMANDS: command_message_handler,
MSGTYPE_ZONE_4_COMMANDS: command_message_handler,
MSGTYPE_VOLUME_DIRECT_COMMANDS: command_message_handler,
MSGTYPE_ZONE_2_VOLUME_DIRECT_COMMANDS: command_message_handler,
MSGTYPE_ZONE_3_VOLUME_DIRECT_COMMANDS: command_message_handler,
MSGTYPE_ZONE_4_VOLUME_DIRECT_COMMANDS: command_message_handler,
MSGTYPE_TRIGGER_DIRECT_COMMANDS: command_message_handler,
}
message_type = payload[1]
message_handler = message_handlers.get(message_type, None)
if message_handler is None:
raise RotelMessageError("Unknown message type byte {:X}".format(message_type))
data = payload[2:]
return message_handler(message_type, data)
async def decode_message_stream(ser):
async for payload in decode_protocol_stream(ser):
yield decode_message(payload) | /rsp1570serial-pp81381-0.1.5.tar.gz/rsp1570serial-pp81381-0.1.5/rsp1570serial/messages.py | 0.433262 | 0.250471 | messages.py | pypi |
import re
import requests
import sys
class Pagination:
"""
For setting page size, number and orderby/ sort fields of listings
"""
def __init__(
self,
page_number: int = 0,
page_size: int = 10,
order_by: str = None,
sort_order: str = "asc",
):
self.data = {
"pageNumber": page_number,
"pageSize": page_size,
}
if order_by is not None:
self.data["orderBy"] = f"{order_by} {sort_order}"
class ClientBase:
"""Base class of common methods for all API clients"""
def __init__(self, rspace_url, api_key):
"""
Initializes RSpace client.
:param api_key: RSpace API key of a user can be found on 'My Profile' page
"""
self.rspace_url = rspace_url
self.api_key = api_key
def _get_headers(self, content_type="application/json"):
return {"apiKey": self.api_key, "Accept": content_type}
@staticmethod
def _get_numeric_record_id(global_id):
"""
Gets numeric part of a global ID.
:param global_id: global ID (for example, SD123 or FM12)
:return: numeric record id
"""
if re.match(r"[a-zA-Z]{2}\d+$", str(global_id)) is not None:
return int(global_id[2:])
elif re.match(r"\d+$", str(global_id)) is not None:
return int(global_id)
else:
raise ValueError("{} is not a valid global ID".format(global_id))
@staticmethod
def _get_formated_error_message(json_error):
return "error message: {}, errors: {}".format(
json_error.get("message", ""),
", ".join(json_error.get("errors", [])) or "no error list",
)
@staticmethod
def _responseContainsJson(response):
return (
"Content-Type" in response.headers
and "application/json" in response.headers["Content-Type"]
)
@staticmethod
def _handle_response(response):
# Check whether response includes UNAUTHORIZED response code
# print("status: {}, header: {}".format(response.headers, response.status_code))
if response.status_code == 401:
raise ClientBase.AuthenticationError(response.json()["message"])
try:
response.raise_for_status()
if ClientBase._responseContainsJson(response):
return response.json()
elif response.text:
return response.text
else:
return response
except:
if "application/json" in response.headers["Content-Type"]:
error = "Error code: {}, {}".format(
response.status_code,
ClientBase._get_formated_error_message(response.json()),
)
else:
error = "Error code: {}, error message: {}".format(
response.status_code, response.text
)
raise ClientBase.ApiError(error, response_status_code=response.status_code)
def doDelete(self, path, resource_id):
"""
Performs a delete operation for a given resource
"""
numeric_id = self._get_numeric_record_id(resource_id)
return self.retrieve_api_results(
"/{}/{}".format(path, numeric_id),
content_type=None,
request_type="DELETE",
)
def retrieve_api_results(
self, endpoint, params=None, content_type="application/json", request_type="GET"
):
"""
Makes the requested API call and returns either an exception or a parsed JSON response as a dictionary.
Authentication header is automatically added. In most cases, a specialised method can be used instead.
:endpoint url: API endpoint
:param request_type: 'GET', 'POST', 'PUT', 'DELETE'
:param params: arguments to be added to the API request
:param content_type: content type
:return: parsed JSON response as a dictionary
"""
url = endpoint
if not endpoint.startswith(self._get_api_url()):
url = self._get_api_url() + endpoint
headers = self._get_headers(content_type)
try:
if request_type == "GET":
response = requests.get(url, params=params, headers=headers)
elif (
request_type == "PUT"
or request_type == "POST"
or request_type == "DELETE"
):
response = requests.request(
request_type, url, json=params, headers=headers
)
else:
raise ValueError(
"Expected GET / PUT / POST / DELETE request type, received {} instead".format(
request_type
)
)
return self._handle_response(response)
except requests.exceptions.ConnectionError as e:
raise ClientBase.ConnectionError(e)
@staticmethod
def _get_links(response):
"""
Finds links part of the response. Most responses contain links section with URLs that might be useful to query
for further information.
:param response: response from the API server
:return: links section of the response
"""
try:
return response["_links"]
except KeyError:
raise ClientBase.NoSuchLinkRel("There are no links!")
def get_link_contents(self, response, link_rel):
"""
Finds a link with rel attribute equal to link_rel and retrieves its contents.
:param response: response from the API server
:param link_rel: rel attribute value to look for
:return: parsed response from the found URL
"""
return self.retrieve_api_results(self.get_link(response, link_rel))
def get_link(self, response, link_rel):
"""
Finds a link with rel attribute equal to link_rel.
:param response: response from the API server.
:param link_rel: rel attribute value to look for
:return: string URL
"""
for link in self._get_links(response):
if link["rel"] == link_rel:
return link["link"]
raise ClientBase.NoSuchLinkRel(
'Requested link rel "{}", available rel(s): {}'.format(
link_rel, (", ".join(x["rel"] for x in self._get_links(response)))
)
)
def download_link_to_file(self, url, filename):
"""
Downloads a file from the API server.
:param url: URL of the file to be downloaded
:param filename: file path to save the file to
"""
headers = {"apiKey": self.api_key, "Accept": "application/octet-stream"}
with open(filename, "wb") as fd:
for chunk in requests.get(url, headers=headers).iter_content(
chunk_size=128
):
fd.write(chunk)
def link_exists(self, response, link_rel):
"""
Checks whether there is a link with rel attribute equal to link_rel in the links section of the response.
:param response: response from the API server
:param link_rel: rel attribute value to look for
:return: True, if the link exists
"""
return link_rel in [x["rel"] for x in self._get_links(response)]
def serr(self, msg: str):
print(msg, file=sys.stderr)
def _stream(
self,
endpoint: str,
pagination: Pagination = Pagination(),
):
"""
Yields items, making paginated requests to the server as each page
is consumed by the calling code.
Note this method assumes that the name of the collection of items in the
response matches the endpoint name. For example 'samples' returns a response
with a dictionary entry 'samples'.
Parameters
----------
endpoint : str
An endpoint with a GET request that makes paginated listings
pagination : Pagination, optional
The pagination control. The default is Pagination().
: Pagination
Yields
------
item : A stream of items, depending on the endpoint called
"""
urlStr = f"{self._get_api_url()}/{endpoint}"
next_link = requests.Request(url=urlStr, params=pagination.data).prepare().url
while True:
if next_link is not None:
items = self.retrieve_api_results(next_link)
for item in items[endpoint]:
yield item
if self.link_exists(items, "next"):
next_link = self.get_link(items, "next")
else:
break
class ConnectionError(Exception):
pass
class AuthenticationError(Exception):
pass
class NoSuchLinkRel(Exception):
pass
class ApiError(Exception):
def __init__(self, error_message, response_status_code=None):
Exception.__init__(self, error_message)
self.response_status_code = response_status_code | /rspace_client-2.5.0-py3-none-any.whl/rspace_client/client_base.py | 0.514888 | 0.154058 | client_base.py | pypi |
import datetime as dt
from urllib.parse import urlparse
class AbsValidator:
def validate(self, item):
pass
def raise_type_error(self, value, expected_type: str):
raise TypeError(f"Expected {value!r} to be {expected_type}")
class Number(AbsValidator):
def validate(self, value):
if not isinstance(value, (int, float)):
self.raise_type_error(value, "an int or float")
class String(AbsValidator):
def validate(self, value):
if not isinstance(value, str):
self.raise_type_error(value, "a string")
class Date(AbsValidator):
def validate(self, value):
if not isinstance(value, dt.date) and not isinstance(value, dt.datetime):
self.raise_type_error(value, "a datetime or date")
class Time(AbsValidator):
def validate(self, value):
if not isinstance(value, dt.datetime) and not isinstance(value, dt.time):
self.raise_type_error(value, "a datetime or date")
class URL(AbsValidator):
def validate(self, item):
if isinstance(item, str):
try:
urlparse(item)
except:
raise TypeError("{type(item)} {item!r} could not be parsed")
else:
self.raise_type_error(item, "a URI string")
class OneOf(AbsValidator):
"""
Validates that argument is one of a list of items passed into constructor
"""
def __init__(self, options):
self.options = options
def validate(self, value: str):
if not isinstance(value, str) or not value in self.options:
self.raise_type_error(value, f"to be one of [{', '.join(self.options)}]")
class AllOf(AbsValidator):
"""
Validates that all items in the argument are in the list of items passed into constructor
"""
def __init__(self, options):
self.options = options
def validate(self, chosen):
if not isinstance(chosen, list) or not all([c in self.options for c in chosen]):
raise TypeError(
f"Expected all chosen items {chosen!r} to be in [{', '.join(self.options)}]"
) # -*- coding: utf-8 -*- | /rspace_client-2.5.0-py3-none-any.whl/rspace_client/validators.py | 0.526586 | 0.395076 | validators.py | pypi |
class QuantityUnit:
"""
Static data from api/v1/units definitions
"""
data = [
{"id": 1, "label": "items", "category": "dimensionless", "description": ""},
{"id": 2, "label": "µl", "category": "volume", "description": ""},
{"id": 3, "label": "ml", "category": "volume", "description": ""},
{"id": 4, "label": "l", "category": "volume", "description": ""},
{"id": 5, "label": "µg", "category": "mass", "description": ""},
{"id": 6, "label": "mg", "category": "mass", "description": ""},
{"id": 7, "label": "g", "category": "mass", "description": ""},
{"id": 8, "label": "C", "category": "temperature", "description": ""},
{"id": 9, "label": "K", "category": "temperature", "description": ""},
{"id": 10, "label": "F", "category": "temperature", "description": ""},
{"id": 11, "label": "nmol/l", "category": "molarity", "description": ""},
{"id": 12, "label": "μmol/l", "category": "molarity", "description": ""},
{"id": 13, "label": "mmol/l", "category": "molarity", "description": ""},
{"id": 14, "label": "mol/l", "category": "molarity", "description": ""},
{"id": 15, "label": "µg/µl", "category": "concentration", "description": ""},
{"id": 16, "label": "mg/ml", "category": "concentration", "description": ""},
{"id": 17, "label": "g/l", "category": "concentration", "description": ""},
]
@staticmethod
def of(label: str) -> dict:
"""
Parameters
----------
label : str
String repersentation of a unit.
Raises
------
ValueError
if no unit definition exists for .
Returns
-------
dict
information about the unit definition.
"""
units = QuantityUnit()._find_label(label)
if len(units) == 0:
raise ValueError(f"{label} not found in unit data")
return units[0]
@staticmethod
def is_supported_unit(label: str) -> bool:
return len(QuantityUnit._find_label(label)) > 0
@staticmethod
def _find_label(label):
return [x for x in QuantityUnit.data if x["label"] == label]
@staticmethod
def unit_labels() -> tuple:
return [x["label"] for x in QuantityUnit.data] | /rspace_client-2.5.0-py3-none-any.whl/rspace_client/inv/quantity_unit.py | 0.853867 | 0.543893 | quantity_unit.py | pypi |
from typing import Optional, Sequence, Union, List
from urllib.parse import urlparse
import numbers
import datetime as dt
from rspace_client.inv.quantity_unit import QuantityUnit
class TemplateBuilder:
"""
Define a SampleTemplate prior to POSTing to RSpace.
A SampleTemplate only requires a name and a default unit to be defined.
The default unit is supplied as a String from a permitted list in class QuantityUnit. E.g. 'ml', 'g'.
"""
numeric = Union[int, float]
def __init__(self, name, defaultUnit, description=None):
if not QuantityUnit.is_supported_unit(defaultUnit):
raise ValueError(
f"{defaultUnit} must be a label of a supported unit in QuantityUnit"
)
self.name = name
self.fields = []
self.qu = QuantityUnit.of(defaultUnit)
if description is not None:
self.description = description
def _set_name(self, name: str, f_type: str):
if len(name) == 0:
raise ValueError("Name cannot be empty or None")
return {"name": name, "type": f_type}
def radio(self, name: str, options: List, selected: str = None):
"""
Parameters
----------
name : str
The field name.
options : List
A list of radio options.
selected : str, optional
An optional string indicating a radio option that should be selected
by default. If this string is not in the 'options' List, it will be ignored
"""
f = self._set_name(name, "Radio")
f["definition"] = {"options": options}
if selected is not None and len(selected) > 0 and selected in options:
f["selectedOptions"] = [selected]
self.fields.append(f)
return self
def choice(self, name: str, options: List, selected: List = None):
"""
Parameters
----------
name : str
The field name.
options : List
A list of choice options.
selected : List, optional
An optional list of options that should be selected. If items in
this list are not in the 'options' List, they will be ignored
"""
f = self._set_name(name, "Choice")
f["definition"] = {"options": options}
if selected is not None and len(selected) > 0:
selected = [x for x in selected if x in options]
if len(selected) > 0:
f["selectedOptions"] = selected
self.fields.append(f)
return self
def string(self, name: str, default: str = None):
f = self._set_name(name, "String")
if default is not None:
f["content"] = default
self.fields.append(f)
return self
def text(self, name: str, default: str = None):
f = self._set_name(name, "Text")
if default is not None:
f["content"] = default
self.fields.append(f)
return self
def number(self, name: str, default: numeric = None):
"""
Parameters
----------
name : str
The field's name.
default : numeric, optional
A default numeric value for the field.
Raises
------
ValueError
if default value is not a number (integer or float).
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Number")
if default is not None:
if isinstance(default, numbers.Number):
f["content"] = default
else:
raise ValueError(f"Numeric field requires number but was '{default}'")
self.fields.append(f)
return self
## TODO date, time, URI, attachment?
def date(self, name: str, isodate: Union[dt.date, dt.datetime, str] = None):
"""
Parameters
----------
name : str
The field name.
isodate : Union[dt.date, dt.datetime, str]
Either a datetime.dateime, a datetime.date, or an ISO-8601 string.
Raises
------
ValueError
if string value is not an ISO8601 date (e.g. 2022-01-27)
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Date")
defaultDate = None
## these conditions must be in order
if isodate is not None:
if isinstance(isodate, dt.datetime):
defaultDate = isodate.date().isoformat()
elif isinstance(isodate, dt.date):
defaultDate = isodate.isoformat()
elif isinstance(isodate, str):
defaultDate = (
dt.datetime.strptime(isodate, "%Y-%m-%d").date().isoformat()
)
if defaultDate is not None:
f["content"] = defaultDate
self.fields.append(f)
return self
def time(self, name: str, isotime: Union[dt.date, dt.time, str] = None):
"""
Parameters
----------
name : str
The field name.
isodate : Union[dt.time, dt.datetime, str]
Either a datetime.datetime, a datetime.time, or an ISO-8601 string.
Raises
------
ValueError
if string value is not an ISO8601 time (e.g. 12:05:36)
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Time")
defaultTime = None
if isotime is not None:
## these conditions must be in order
if isinstance(isotime, dt.datetime):
defaultTime = isotime.time().isoformat()
elif isinstance(isotime, dt.time):
defaultTime = isotime.isoformat()
elif isinstance(isotime, str):
defaultTime = dt.time.fromisoformat(isotime).isoformat()
if defaultTime is not None:
f["content"] = defaultTime
self.fields.append(f)
return self
def attachment(self, name: str, desc: str = None):
"""
Parameters
----------
name : str
The field name.
desc : str, optional
An optional default description of the file to upload.
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Attachment")
if desc is not None and len(desc) > 0 and len(str.strip(desc)) > 0:
f["content"] = desc
self.fields.append(f)
return self
def uri(self, name: str, uri: str = None):
"""
Parameters
----------
name : str
The field name.
uri : str, optional
An optional default URI
Returns
-------
This object for chaining
Raises
------
ValueError if URI is not parsable into a URI
"""
f = self._set_name(name, "Uri")
if uri is not None and len(uri) > 0 and len(str.strip(uri)) > 0:
parsed_uri = urlparse(uri)
f["content"] = uri
self.fields.append(f)
return self
def field_count(self):
return len(self.fields)
def build(self) -> dict:
d = {"name": self.name, "defaultUnitId": self.qu["id"], "fields": self.fields}
if hasattr(self, "description"):
d["description"] = self.description
return d
def _fields(self):
return self.fields | /rspace_client-2.5.0-py3-none-any.whl/rspace_client/inv/template_builder.py | 0.88029 | 0.345547 | template_builder.py | pypi |
import datetime
import time
import os
import requests
import rspace_client.eln.filetree_importer as importer
from rspace_client.eln.dcs import DocumentCreationStrategy
from rspace_client.client_base import ClientBase, Pagination
class ELNClient(ClientBase):
"""Client for RSpace API v1.
Most methods return a dictionary with fields described in the API documentation. The documentation can be found at
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
For authentication, an API key must be provided. It can be found by logging in and navigating to 'My Profile' page.
"""
API_VERSION = "v1"
def _get_api_url(self):
"""
Returns an API server URL.
:return: string URL
"""
return "{}/api/{}".format(self.rspace_url, self.API_VERSION)
# Documents methods
def get_documents(
self, query=None, order_by="lastModified desc", page_number=0, page_size=20
):
"""
The Documents endpoint returns a paginated list of summary information about Documents in the RSpace workspace.
These can be individual documents or notebook entries. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param (optional) query: Global search for a term, works identically to the simple "All' search in RSpace
Workspace.
:param order_by: Sort order for documents.
:param page_number: For paginated results, this is the number of the page requested, 0 based.
:param page_size: The maximum number of items to retrieve.
:return: parsed response as a dictionary
"""
params = {"orderBy": order_by, "pageSize": page_size, "pageNumber": page_number}
if query is not None:
params["query"] = query
return self.retrieve_api_results("/documents", params)
def stream_documents(self, pagination: Pagination = Pagination()):
return self._stream("documents", pagination)
def get_documents_advanced_query(
self, advanced_query, order_by="lastModified desc", page_number=0, page_size=20
):
"""
The Documents endpoint returns a paginated list of summary information about Documents in the RSpace workspace.
These can be individual documents or notebook entries. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param advanced_query: JSON representation of a search query. This can be built using AdvancedQueryBuilder.
:param order_by: Sort order for documents.
:param page_number: For paginated results, this is the number of the page requested, 0 based.
:param page_size: The maximum number of items to retrieve.
:return: parsed response as a dictionary
"""
params = {
"advancedQuery": advanced_query,
"orderBy": order_by,
"pageSize": page_size,
"pageNumber": page_number,
}
return self.retrieve_api_results("/documents", params)
def get_document(self, doc_id):
"""
Gets information about a document. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param doc_id: numeric document ID or global ID
:return: a dictionary that includes: document metadata, field content, metadata about media items belonging to
this document, links to download the content of media files
"""
numeric_doc_id = self._get_numeric_record_id(doc_id)
return self.retrieve_api_results("/documents/{}".format(numeric_doc_id))
def delete_document(self, doc_id):
"""
Marks document as deleted.
:param doc_id: numeric document ID or global ID
"""
return self.doDelete("/documents", doc_id)
def get_document_csv(self, doc_id):
"""
Gets information about a document. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param doc_id: numeric document ID or global ID
:return: CSV that includes: document metadata, field content, metadata about media items belonging to
this document, links to download the content of media files
"""
numeric_doc_id = self._get_numeric_record_id(doc_id)
return self.retrieve_api_results(
"/documents/{}".format(numeric_doc_id),
content_type="text/csv",
)
def create_document(
self, name=None, parent_folder_id=None, tags=None, form_id=None, fields=None
):
"""
Creates a new document in user's Api Inbox folder. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param name: name of the document (can be omitted)
:param tags: list of tags (['tag1', 'tag2']) or comma separated string of tags ('tag1,tag2'); optional
:param form_id: numeric document ID or global ID' optional; defaults to BasicDocument
:param parent_folder_id: ID of workspace folder or subfolder; optional; defaults to ApiInbox folder
:param fields: list of fields (dictionaries of (optionally) ids and contents). For example,
[{'content': 'some example text'}] or [{'id': 123, 'content': 'some example text'}].
:return: parsed response as a dictionary
"""
data = {}
if name is not None:
data["name"] = name
if parent_folder_id is not None:
numeric_folder_id = self._get_numeric_record_id(parent_folder_id)
data["parentFolderId"] = numeric_folder_id
if tags is not None:
if isinstance(tags, list):
tags = ",".join(tags)
data["tags"] = tags
if form_id is not None:
numeric_form_id = self._get_numeric_record_id(form_id)
data["form"] = {"id": int(numeric_form_id)}
if fields is not None and len(fields) > 0:
data["fields"] = fields
return self.retrieve_api_results("/documents", request_type="POST", params=data)
def prepend_content(self, document_id, html_content, field_index=0):
"""
Prepends content to the beginning of a field. If the field_id is omitted,
this will prepend content to the first field.
If field_id is set, this must be a field_id that belongs to the document.
Parameters
----------
document_id : Integer
The id of the document that is being modified.
htmlContent : String
HTML snippet.
field_index : Integer, optional, default = 0
Index of the field ( 0-based)
Returns
-------
The updated document
"""
return self._add_content(document_id, html_content, field_index, False)
def append_content(self, document_id, html_content, field_index=0):
"""
Appends content to the end of a field. If the field_id is omitted,
this will append content to the end of the first field.
If field_id is set, this must be a field_id that belongs to the document.
Parameters
----------
document_id : Integer
The id of the document that is being modified.
htmlContent : String
HTML snippet.
field_index : Integer, optional - default = 0
Index of the field ( 0-based)
Returns
-------
The updated document
"""
return self._add_content(document_id, html_content, field_index, True)
def _add_content(self, document_id, html_content, field_index=0, append=True):
if document_id is None:
raise ValueError("No document ID was set")
if html_content is None:
raise ValueError("No HTML content was set")
doc = self.get_document(document_id)
field = None
if field_index > 0:
fields = doc["fields"]
if field_index >= len(fields):
raise ValueError(
"Field at index {} doesn't exist, document {} has {} fields".format(
field_index, document_id, len(fields)
)
)
field = fields[field_index]
else:
field = doc["fields"][0]
if append:
new_content = field["content"] + html_content
else:
new_content = html_content + field["content"]
to_update = [{"id": field["id"], "content": new_content}]
return self.update_document(
document_id, form_id=doc["form"]["id"], fields=to_update
)
def update_document(
self, document_id, name=None, tags=None, form_id=None, fields=None
):
"""
Updates a document with a given document id. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param document_id: numeric document ID or global ID
:param name: name of the document (can be omitted)
:param tags: list of tags (['tag1', 'tag2']) or comma separated string of tags ('tag1,tag2') (can be omitted)
:param form_id: numeric document ID or global ID (should be left None or otherwise should match the form id
of the document)
:param fields: list of fields (dictionaries of (optionally) ids and contents). For example,
[{'content': 'some example text'}] or [{'id': 123, 'content': 'some example text'}]. (can be omitted)
:return:
"""
data = {}
if name is not None:
data["name"] = name
if tags is not None:
if isinstance(tags, list):
tags = ",".join(tags)
data["tags"] = tags
if form_id is not None:
numeric_form_id = self._get_numeric_record_id(form_id)
data["form"] = {"id": int(numeric_form_id)}
if fields is not None and len(fields) > 0:
data["fields"] = fields
numeric_doc_id = self._get_numeric_record_id(document_id)
return self.retrieve_api_results(
"/documents/{}".format(numeric_doc_id),
request_type="PUT",
params=data,
)
# Sharing methods
def shareDocuments(
self, itemsToShare, groupId, sharedFolderId=None, permission="READ"
):
"""
Shares 1 or more notebooks or documents with 1 group. You can optionally
specify the id of a folder to share into. If not set will share into the
top level of the group shared folder.
with read permission into
:param itemsToShare: A list of document/notebook IDs to share
:param groupId: The ID of a group to share with
:param sharedFolderId: The ID of a subfolder of the group's shared folder.
:param permission: The permission to use, default is "READ", or "EDIT"
"""
if len(itemsToShare) == 0:
raise ValueError("Must be at least 1 item to share")
sharePost = dict()
sharePost["itemsToShare"] = itemsToShare
groupShare = {"id": groupId, "permission": permission}
if sharedFolderId is not None:
groupShare["sharedFolderId"] = sharedFolderId
sharePost["groups"] = [groupShare]
return self.retrieve_api_results(
"/share", request_type="POST", params=sharePost
)
def unshareItem(self, sharingId):
return self.doDelete("share", sharingId)
def get_shared_items(
self, query=None, order_by="name asc", page_number=0, page_size=20
):
"""
Paginated listing of shared items; default ordering is by document/notebook name.
:param page_number: For paginated results, this is the number of the page requested, 0 based.
:param page_size: The maximum number of items to retrieve.
:param order_by: Sort order for sharedItems - either 'name' or 'sharee' - the name of user
or group item is shared with.
"""
params = {"orderBy": order_by, "pageSize": page_size, "pageNumber": page_number}
if query is not None:
params["query"] = query
return self.retrieve_api_results("/share", params)
# File methods
def get_files(
self,
page_number=0,
page_size=20,
order_by="lastModified desc",
media_type="image",
):
"""
Lists media items - i.e. content shown in the Gallery in RSpace web application. Note that this does not include
files linked from external file systems or 3rd party providers. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param page_number: For paginated results, this is the number of the page requested, 0 based.
:param page_size: The maximum number of items to retrieve.
:param order_by: Sort order for documents.
:param media_type: can be 'image', 'av' (audio or video), 'document' (any other file)
:return: parsed response as a dictionary
"""
params = {
"pageNumber": page_number,
"pageSize": page_size,
"orderBy": order_by,
"mediaType": media_type,
}
return self.retrieve_api_results("/files", params)
def get_file_info(self, file_id):
"""
Gets metadata of a single file by its id. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param file_id: numeric document ID or global ID
:return: parsed response as a dictionary
"""
numeric_file_id = self._get_numeric_record_id(file_id)
return self.retrieve_api_results("/files/{}".format(numeric_file_id))
def download_file(self, file_id, filename):
"""
Downloads file contents. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param file_id: numeric document ID or global ID
:param filename: file path to save the file to
"""
numeric_file_id = self._get_numeric_record_id(file_id)
url_base = self._get_api_url()
return self.download_link_to_file(
f"{url_base}/files/{numeric_file_id}/file", filename
)
def upload_file(self, file, folder_id=None, caption=None):
"""
Upload a file to the gallery. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param file: open file object
:param folder_id: folder id of the destination folder
:param caption: optional caption
:return: parsed response as a dictionary
"""
data = {}
if folder_id is not None:
numeric_folder_id = self._get_numeric_record_id(folder_id)
data["folderId"] = numeric_folder_id
if caption is not None:
data["caption"] = caption
response = requests.post(
self._get_api_url() + "/files",
files={"file": file},
data=data,
headers=self._get_headers(),
)
return self._handle_response(response)
def update_file(self, file, fileId):
"""
Upload a file to the gallery. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param file: open file object
:param fileId: Id of the file to replace
:return: updated File response as a dictionary
"""
response = requests.post(
self._get_api_url() + "/files/{}/file".format(fileId),
files={"file": file},
headers=self._get_headers(),
)
return self._handle_response(response)
# Activity methods
def get_activity(
self,
page_number=0,
page_size=100,
order_by=None,
date_from=None,
date_to=None,
actions=None,
domains=None,
global_id=None,
users=None,
):
"""
Returns all activity for a particular document. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param page_number: for paginated results, this is the number of the page requested, 0 based.
:param page_size: the maximum number of items to retrieve.
:param order_by: sort order for activities.
:param date_from: yyyy-mm-dd string or datetime.date object. The earliest date to retrieve activity from.
:param date_to: yyyy-mm-dd string or datetime.date object. The latest date to retrieve activity from.
:param actions: a comma separated string or list of strings. Actions to restrict the query.
:param domains: a comma separated string or list of strings. Domains to restrict the query.
:param global_id: the global ID of a resource, e.g. SD12345
:param users: a comma separated string or list of strings. Users to restrict the query.
:return:
"""
params = {"pageNumber": page_number, "pageSize": page_size}
if order_by is not None:
params["orderBy"] = order_by
if date_from is not None:
if isinstance(date_from, datetime.date):
params["dateFrom"] = date_from.isoformat()
else:
raise TypeError("Unexpected date_from type {}".format(type(date_from)))
if date_to is not None:
if isinstance(date_to, datetime.date):
params["dateTo"] = date_to.isoformat()
else:
raise TypeError("Unexpected date_from type {}".format(type(date_to)))
if actions is not None:
if isinstance(actions, list):
params["actions"] = ",".join(actions)
else:
raise TypeError("Unexpected actions type {}".format(type(actions)))
if domains is not None:
if isinstance(domains, list):
params["domains"] = ",".join(domains)
else:
raise TypeError(f"Unexpected domains type {type(domains)}")
if global_id is not None:
params["oid"] = str(global_id)
if users is not None:
if isinstance(users, list):
params["users"] = ",".join(users)
else:
raise TypeError(f"Unexpected users type {type(users)}")
return self.retrieve_api_results("/activity", params=params)
# Export selection
def start_export_selection(
self, export_format, item_ids=[], include_revisions=False
):
"""
Starts an asynchronous of selection of items.
:param export_format: 'xml' or 'html'
:param item_ids: One more IDs of documents, notebooks, folders\
or attachments
:param include_revisions: A Boolean as to whether items' previous\
versions should be included in the export. Default is False
:return: job id
"""
self._check_export_format(export_format)
itemsToExport = "".join(item_ids)
request_url = (
self._get_api_url()
+ f"/export/{export_format}/selection?selections={itemsToExport}&includeRevisionHistory={include_revisions}"
)
return self.retrieve_api_results(request_url, request_type="POST")
# Export
def start_export(self, export_format, scope, uid=None, include_revisions=False):
"""
Starts an asynchronous export of user's or group's records.
:param export_format: 'xml' or 'html'
:param scope: 'user' or 'group'
:param uid: id of a user or a group depending on the scope (current user or group will be used if not provided)
:param include_revisions: A Boolean as to whether items' previous\
versions should be included in the export, default is False
:return: job id
"""
self._check_export_format(export_format)
if scope != "user" and scope != "group":
raise ValueError(
f"scope must be either 'user' or 'group', got '{scope}' instead"
)
if uid is not None:
request_url = (
self._get_api_url()
+ f"/export/{export_format}/{scope}/{uid}?includeRevisionHistory={include_revisions}"
)
else:
request_url = (
self._get_api_url()
+ f"/export/{export_format}/{scope}?includeRevisionHistory={include_revisions}"
)
return self.retrieve_api_results(request_url, request_type="POST")
def _check_export_format(self, export_format):
if export_format != "xml" and export_format != "html":
raise ValueError(
f" format must be either 'xml' or 'html', got '{export_format}' instead"
)
def download_export_selection(
self,
export_format,
file_path,
item_ids=[],
include_revision_history=False,
wait_between_requests=30,
):
"""
Exports record selection and downloads the exported archive to a specified location.
:param export_format: 'xml' or 'html'
:param file_path: can be either a directory or a new file in an existing directory
:param uid: id of a user or a group depending on the scope (current user or group will be used if not provided)
:param include_revision_history: whether to include all revisions
:param wait_between_requests: seconds to wait between job status requests (30 seconds default)
:return: file path to the downloaded export archive
"""
job_id = self.start_export_selection(
export_format, item_ids, include_revision_history
)["id"]
return self._wait_till_complete_then_download(
job_id, file_path, wait_between_requests
)
def _wait_till_complete_then_download(
self, job_id, file_path, wait_between_requests=30, progress_log=None
):
while True:
status_response = self.get_job_status(job_id)
if status_response["status"] == "COMPLETED":
download_url = self.get_link(status_response, "enclosure")
self._log_progress(progress_log, f"COMPLETED: download url is {download_url}")
if os.path.isdir(file_path):
file_path = os.path.join(file_path, download_url.split("/")[-1])
self.download_link_to_file(download_url, file_path)
return file_path
elif status_response["status"] == "FAILED":
msg = "Export job failed: "
+ self._get_formated_error_message(status_response["result"])
self._log_progress(progress_log, msg)
raise ClientBase.ApiError(msg)
elif status_response["status"] == "ABANDONED":
raise ClientBase.ApiError(
"Export job was abandoned: "
+ self._get_formated_error_message(status_response["result"])
)
elif (
status_response["status"] == "RUNNING"
or status_response["status"] == "STARTING"
or status_response["status"] == "STARTED"
):
self._log_progress(progress_log, f"Running - {status_response['percentComplete']:2.2f}% complete")
time.sleep(wait_between_requests)
continue
else:
raise ClientBase.ApiError(
"Unknown job status: " + status_response["status"]
)
def _log_progress(self, progress_log, msg: str):
if progress_log is not None:
with open(progress_log, "a") as log:
log.write(msg)
if not msg.endswith("\n"):
log.write("\n")
def export_and_download (self, export_format,scope,file_path, uid=None,
include_revisions=False,
wait_between_requests=30,
progress_log=None):
"""
Exports user's or group's records and downloads the exported archive to a specified location.
:param export_format: 'xml' or 'html'
:param scope: 'user' or 'group'
:param file_path: can be either a directory or a new file in an existing directory
:param uid: id of a user or a group depending on the scope (current user or group will be used if not provided)
:param include_revision_history: whether to include all revisions
:param wait_between_requests: seconds to wait between job status requests (30 seconds default)
:param an optional file-path to a writable log file, to log progress.
:return: file path to the downloaded export archive.
"""
return self.download_export(export_format,scope,file_path,uid, include_revisions,wait_between_requests, progress_log)
def download_export(
self,
export_format,
scope,
file_path,
uid=None,
include_revisions=False,
wait_between_requests=30,
progress_log=None
):
"""
DEPRECATED since 2.5.0. Use 'export_and_download' which better describes this method and works in exactly the same way.
Exports user's or group's records and downloads the exported archive to a specified location.
:param export_format: 'xml' or 'html'
:param scope: 'user' or 'group'
:param file_path: can be either a directory or a new file in an existing directory
:param uid: id of a user or a group depending on the scope (current user or group will be used if not provided)
:param include_revision_history: whether to include all revisions
:param wait_between_requests: seconds to wait between job status requests (30 seconds default)
:param an optional file-path to a writable log file, to log progress.
:return: file path to the downloaded export archive.
"""
self._log_progress(progress_log, f"{datetime.datetime.now()} - Starting export..")
job_id = self.start_export(
export_format=export_format,
scope=scope,
uid=uid,
include_revisions=include_revisions,
)["id"]
return self._wait_till_complete_then_download(
job_id, file_path, wait_between_requests, progress_log
)
def get_job_status(self, job_id):
"""
Return a job status.
:param job_id: job id
:return: parsed response as a dictionary (most important field is 'status' which is supposed to one of:
'STARTED', 'STARTING', 'RUNNING', 'COMPLETED', 'FAILED', 'ABANDONED')
"""
return self.retrieve_api_results("/jobs/{}".format(job_id))
# Form related methods
def get_forms(
self, query=None, order_by="lastModified desc", page_number=0, page_size=20
):
"""
Provides a paginated list of Forms. You can use this endpoint to retrieve the IDs of the forms from which you
want to create documents. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param (optional) query: Whole or part of a Form's name or tag.
:param order_by: Sort order for Forms.
:param page_number: For paginated results, this is the number of the page requested, 0 based.
:param page_size: The maximum number of items to retrieve.
:return: parsed response as a dictionary
"""
params = {"orderBy": order_by, "pageSize": page_size, "pageNumber": page_number}
if query is not None:
params["query"] = query
return self.retrieve_api_results("/forms", params)
def create_form(self, name, tags=None, fields=None):
"""
Create a new Form, supplying the field definitions. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param name: name of the form
:param tags: list of tags (['tag1', 'tag2']) or comma separated string of tags ('tag1,tag2') (optional)
:param fields: list of fields (dictionaries of 'name', 'type' and optionally other parameters). Currently,
supported types of Form fields are: 'String', 'Text', 'Number', 'Radio', 'Date'. More information can be found
on /public/apiDocs.
:return: parsed response as a dictionary
"""
data = {}
if name is None or len(name) == 0:
raise ValueError("Name is a required argument")
data["name"] = name
if tags is not None:
if isinstance(tags, list):
tags = ",".join(tags)
data["tags"] = tags
if fields is not None and len(fields) > 0:
data["fields"] = fields
else:
raise ValueError("There has to be at least one field")
return self.retrieve_api_results("/forms", request_type="POST", params=data)
def get_form(self, form_id):
"""
Gets information about a Form. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param form_id: numeric form ID or global ID
:return: a dictionary that includes: form metadata, fields
"""
numeric_doc_id = self._get_numeric_record_id(form_id)
return self.retrieve_api_results("/forms/{}".format(numeric_doc_id))
def delete_form(self, form_id):
"""
Deletes form by its ID, if it is in 'NEW' state or has not been deleted.
:param form_id: numeric Form ID or global ID
"""
return self.doDelete("forms", form_id)
def publish_form(self, form_id):
"""
A newly created form is not available to create documents from until it has been published. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param form_id: numeric form ID or global ID
:return: a dictionary that includes: form metadata, fields
"""
numeric_doc_id = self._get_numeric_record_id(form_id)
return self.retrieve_api_results(
"/forms/{}/publish".format(numeric_doc_id),
request_type="PUT",
)
def unpublish_form(self, form_id):
"""
Unpublishing a form hides it from being available to create documents. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param form_id: numeric form ID or global ID
:return: a dictionary that includes: form metadata, fields
"""
numeric_doc_id = self._get_numeric_record_id(form_id)
return self.retrieve_api_results(
"/forms/{}/unpublish".format(numeric_doc_id),
request_type="PUT",
)
def share_form(self, form_id):
"""
Shares this form with your groups. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param form_id: numeric form ID or global ID
:return: a dictionary that includes: form metadata, fields
"""
numeric_doc_id = self._get_numeric_record_id(form_id)
return self.retrieve_api_results(
"/forms/{}/share".format(numeric_doc_id),
request_type="PUT",
)
def unshare_form(self, form_id):
"""
Unshares this form with your groups. Only the owner of the Form (its creator) will be able to read or modify
this Form after this action is performed. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param form_id: numeric form ID or global ID
:return: a dictionary that includes: form metadata, fields
"""
numeric_doc_id = self._get_numeric_record_id(form_id)
return self.retrieve_api_results(
"/forms/{}/unshare".format(numeric_doc_id),
request_type="PUT",
)
# Folder / notebook methods
def create_folder(self, name, parent_folder_id=None, notebook=False):
"""
Creates containers to hold RSpace documents and notebook entries. You can create folders in your Workspace and
Gallery folders, and notebooks in your Workspace. More information on
https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:param name: name of the folder or notebook
:param parent_folder_id: numeric form ID or global ID
:param notebook: True to create a notebook, False to create a folder
:return: metadata about the created notebook or folder
"""
data = {"notebook": notebook}
if name is None or len(name) == 0:
raise ValueError("Name is a required argument")
data["name"] = name
if parent_folder_id is not None:
numeric_folder_id = self._get_numeric_record_id(parent_folder_id)
data["parentFolderId"] = numeric_folder_id
return self.retrieve_api_results("/folders", request_type="POST", params=data)
def delete_folder(self, folder_id):
"""
Deletes a folder or notebook by its ID.
:param form_id: numeric Folder/Notebook ID or global ID
"""
return self.doDelete("folders", folder_id)
def get_folder(self, folder_id):
"""
Getter for a Folder or notebook that you are authorised to view.
:param folder_id: numeric folder ID or global ID
:return: metadata about the folder or notebook
"""
numeric_folder_id = self._get_numeric_record_id(folder_id)
return self.retrieve_api_results("/folders/{}".format(numeric_folder_id))
def list_folder_tree(self, folder_id=None, typesToInclude=[]):
"""
Lists contents of a folder by its ID.
:param folder_id. Optional folderId. If none, will return listing of Home Folder
:param typesToInclude: An optional list of any of 'folder', 'notebook' or 'document'. Results
will be restricted to these types
:return a paginated folder listing
"""
url = ""
if folder_id is not None:
url = "/folders/tree/{}".format(folder_id)
else:
url = "/folders/tree"
params = {}
if len(typesToInclude) > 0:
if (
"document" not in typesToInclude
and "notebook" not in typesToInclude
and "folder" not in typesToInclude
):
raise ValueError(
'typesToInclude must be contain "document", "notebook" and/or "folder"'
)
params["typesToInclude"] = ",".join(typesToInclude)
return self.retrieve_api_results(url, params)
# Groups methods
def get_groups(self):
"""
Gets a list of groups that you belong to. May be empty if you are not
in any groups.
"""
return self.retrieve_api_results("/groups")
# Import methods
def import_word(self, file, folder_id=None, image_folder_id=None):
"""
Imports a Word file into RSpace and creates an RSpace document from it.
:param file: The Word file to import
:param folder_id: Optionally, the ID of a folder in which to create the
new document
:param folder_id: Optionally, the ID of a folder in the image gallery
into which images extracted from Word documents will be placed. By default, these
will be placed in the top-level of the Gallery.
"""
data = {}
if folder_id is not None:
numeric_folder_id = self._get_numeric_record_id(folder_id)
data["folderId"] = numeric_folder_id
if image_folder_id is not None:
numeric_imagefolder_id = self._get_numeric_record_id(image_folder_id)
data["imageFolderId"] = numeric_imagefolder_id
response = requests.post(
self._get_api_url() + "/import/word",
files={"file": file},
data=data,
headers=self._get_headers(),
)
return self._handle_response(response)
# Miscellaneous methods
def get_status(self):
"""
Simple API call to check that API service is available. Throws an AuthenticationError if authentication fails.
More information on https://community.researchspace.com/public/apiDocs (or your own instance's /public/apiDocs).
:return: parsed response as a dictionary (most important field is 'message' which is supposed to be 'OK')
"""
return self.retrieve_api_results("/status")
##### Non - documented, non public API methods:
# Sysadmin methods
def get_users(
self,
page_number=0,
page_size=20,
tempaccount_only=True,
created_before="2018-04-30",
last_login_before=None,
):
"""
Gets list of temporary users
"""
params = {
"pageSize": page_size,
"pageNumber": page_number,
"createdBefore": created_before,
"tempAccountsOnly": tempaccount_only,
}
if last_login_before is not None:
params["lastLoginBefore"] = last_login_before
return self.retrieve_api_results("/sysadmin/users", params)
def deleteTempUser(self, user_id):
return self.doDelete("sysadmin/users", user_id)
def import_tree(
self,
data_dir: str,
parent_folder_id: int = None,
ignore_hidden_folders: bool = True,
halt_on_error: bool = False,
doc_creation=DocumentCreationStrategy.DOC_PER_FILE,
) -> dict:
"""
Imports a directory tree into RSpace, recreating the tree in RSpace,
uploading files and creatig documents with links to the files.
Parameters
----------
data_dir : str
Path to top-level of directory tree.
parent_folder_id: int, optional
The id of the RSpace folder into which the top-level directory is created.
If not specified will be created in Workspace top-level folder.
ignore_hidden_folders : bool, optional
Whether hidden folders (names starting with '.') - should be ignored. The default is True.
halt_on_error : bool, optional
Whether to halt the process in case of IO error reading files. The default is False.
Returns
-------
dict
An indication of success/failure, and mappings of files and folders to
RSpace Ids.
"""
tree_import = importer.TreeImporter(self)
return tree_import.import_tree(
data_dir,
parent_folder_id,
ignore_hidden_folders,
halt_on_error,
doc_creation,
) | /rspace_client-2.5.0-py3-none-any.whl/rspace_client/eln/eln.py | 0.725843 | 0.289811 | eln.py | pypi |
from bs4 import BeautifulSoup
import re
class FieldContent:
"""
Encapsulates HTML content of text fields and provides methods to pull
out features of interest such as tables, links etc
"""
def __init__(self, html_content):
self.html = html_content
self.soup = BeautifulSoup(self.html, "html.parser")
def get_text(self):
"""
Gets the text of the field, stripped of all HTML tags
Returns
-------
str
HTML-stripped content.
"""
return self.soup.get_text()
def get_datatables(
self, search_term=None, ignore_empty_rows=True, ignore_empty_columns=True
):
"""
Parses HTML content to identify and return CalculationTables as 2d arrays
Parameters
----------
search_term : A query string, optional
Use as a filter to return only data-tabes containing this string. The default is None.
ignore_empty_rows : Boolean, optional
The default is True.
ignore_empty_columns : Boolean, optional
The default is True.
Returns
-------
all_tables : A List of 2d arrays
A possibly empty list of 2d arrays in format [row][column]
containing the data-set values. Formulas are not included
"""
divs = self.soup.find_all("div", class_="rsCalcTableDiv")
all_tables = []
for div in divs:
if search_term is not None:
if re.search(search_term, div.get_text(), re.IGNORECASE) is None:
continue
trs = div.find_all("tr")
all_r_data = []
for tr in trs:
r_data = [el.get_text().strip() for el in tr.find_all("td")]
if ignore_empty_rows is False or any([len(x) > 0 for x in r_data]):
all_r_data.append(r_data)
if ignore_empty_columns and len(all_r_data) > 0:
colsToRemove = []
## find empty columns
for i in range(len(all_r_data[0])):
a_none = all(len(r[i]) == 0 for r in all_r_data)
if a_none:
colsToRemove.append(i)
## remove each column
for j in range(len(colsToRemove)):
for r in all_r_data:
r.pop(colsToRemove[j] - j)
all_tables.append(all_r_data)
## a list of 2d arrays. Each row has same number of columns
return all_tables | /rspace_client-2.5.0-py3-none-any.whl/rspace_client/eln/field_content.py | 0.643441 | 0.322419 | field_content.py | pypi |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def exists(self, table, tget):
"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- tget: the TGet to check for
"""
pass
def existsAll(self, table, tgets):
"""
Test for the existence of columns in the table, as specified by the TGets.
This will return an array of booleans. Each value will be true if the related Get matches
one or more keys, false if not.
Parameters:
- table: the table to check on
- tgets: a list of TGets to check for
"""
pass
def get(self, table, tget):
"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- tget: the TGet to fetch
"""
pass
def getMultiple(self, table, tgets):
"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
pass
def put(self, table, tput):
"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- tput: the TPut to put
"""
pass
def checkAndPut(self, table, row, family, qualifier, value, tput):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
pass
def putMultiple(self, table, tputs):
"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
pass
def deleteSingle(self, table, tdelete):
"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
pass
def deleteMultiple(self, table, tdeletes):
"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
pass
def checkAndDelete(self, table, row, family, qualifier, value, tdelete):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
pass
def increment(self, table, tincrement):
"""
Parameters:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
pass
def append(self, table, tappend):
"""
Parameters:
- table: the table to append the value on
- tappend: the TAppend to append
"""
pass
def openScanner(self, table, tscan):
"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
pass
def getScannerRows(self, scannerId, numRows):
"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
pass
def closeScanner(self, scannerId):
"""
Closes the scanner. Should be called to free server side resources timely.
Typically close once the scanner is not needed anymore, i.e. after looping
over it to get all the required rows.
Parameters:
- scannerId: the Id of the Scanner to close *
"""
pass
def mutateRow(self, table, trowMutations):
"""
mutateRow performs multiple mutations atomically on a single row.
Parameters:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
pass
def getScannerResults(self, table, tscan, numRows):
"""
Get results for the provided TScan object.
This helper function opens a scanner, get the results and close the scanner.
@return between zero and numRows TResults
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
pass
def getRegionLocation(self, table, row, reload):
"""
Given a table and a row get the location of the region that
would contain the given row key.
reload = true means the cache will be cleared and the location
will be fetched from meta.
Parameters:
- table
- row
- reload
"""
pass
def getAllRegionLocations(self, table):
"""
Get all of the region locations for a given table.
Parameters:
- table
"""
pass
def checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it mutates the row.
@return true if the row was mutated, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOp: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
pass
def getTableDescriptor(self, table):
"""
Get a table descriptor.
@return the TableDescriptor of the giving tablename
Parameters:
- table: the tablename of the table to get tableDescriptor
"""
pass
def getTableDescriptors(self, tables):
"""
Get table descriptors of tables.
@return the TableDescriptor of the giving tablename
Parameters:
- tables: the tablename list of the tables to get tableDescriptor
"""
pass
def tableExists(self, tableName):
"""
@return true if table exists already, false if not
Parameters:
- tableName: the tablename of the tables to check
"""
pass
def getTableDescriptorsByPattern(self, regex, includeSysTables):
"""
Get table descriptors of tables that match the given pattern
@return the tableDescriptors of the matching table
Parameters:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
pass
def getTableDescriptorsByNamespace(self, name):
"""
Get table descriptors of tables in the given namespace
@return the tableDescriptors in the namespce
Parameters:
- name: The namesapce's name
"""
pass
def getTableNamesByPattern(self, regex, includeSysTables):
"""
Get table names of tables that match the given pattern
@return the table names of the matching table
Parameters:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
pass
def getTableNamesByNamespace(self, name):
"""
Get table names of tables in the given namespace
@return the table names of the matching table
Parameters:
- name: The namesapce's name
"""
pass
def createTable(self, desc, splitKeys):
"""
Creates a new table with an initial set of empty regions defined by the specified split keys.
The total number of regions created will be the number of split keys plus one. Synchronous
operation.
Parameters:
- desc: table descriptor for table
- splitKeys: rray of split keys for the initial regions of the table
"""
pass
def deleteTable(self, tableName):
"""
Deletes a table. Synchronous operation.
Parameters:
- tableName: the tablename to delete
"""
pass
def truncateTable(self, tableName, preserveSplits):
"""
Truncate a table. Synchronous operation.
Parameters:
- tableName: the tablename to truncate
- preserveSplits: whether to preserve previous splits
"""
pass
def enableTable(self, tableName):
"""
Enalbe a table
Parameters:
- tableName: the tablename to enable
"""
pass
def disableTable(self, tableName):
"""
Disable a table
Parameters:
- tableName: the tablename to disable
"""
pass
def isTableEnabled(self, tableName):
"""
@return true if table is enabled, false if not
Parameters:
- tableName: the tablename to check
"""
pass
def isTableDisabled(self, tableName):
"""
@return true if table is disabled, false if not
Parameters:
- tableName: the tablename to check
"""
pass
def isTableAvailable(self, tableName):
"""
@return true if table is available, false if not
Parameters:
- tableName: the tablename to check
"""
pass
def isTableAvailableWithSplit(self, tableName, splitKeys):
"""
* Use this api to check if the table has been created with the specified number of splitkeys
* which was used while creating the given table. Note : If this api is used after a table's
* region gets splitted, the api may return false.
*
* @return true if table is available, false if not
*
Parameters:
- tableName: the tablename to check
- splitKeys: keys to check if the table has been created with all split keys
"""
pass
def addColumnFamily(self, tableName, column):
"""
Add a column family to an existing table. Synchronous operation.
Parameters:
- tableName: the tablename to add column family to
- column: column family descriptor of column family to be added
"""
pass
def deleteColumnFamily(self, tableName, column):
"""
Delete a column family from a table. Synchronous operation.
Parameters:
- tableName: the tablename to delete column family from
- column: name of column family to be deleted
"""
pass
def modifyColumnFamily(self, tableName, column):
"""
Modify an existing column family on a table. Synchronous operation.
Parameters:
- tableName: the tablename to modify column family
- column: column family descriptor of column family to be modified
"""
pass
def modifyTable(self, desc):
"""
Modify an existing table
Parameters:
- desc: the descriptor of the table to modify
"""
pass
def createNamespace(self, namespaceDesc):
"""
Create a new namespace. Blocks until namespace has been successfully created or an exception is
thrown
Parameters:
- namespaceDesc: descriptor which describes the new namespace
"""
pass
def modifyNamespace(self, namespaceDesc):
"""
Modify an existing namespace. Blocks until namespace has been successfully modified or an
exception is thrown
Parameters:
- namespaceDesc: descriptor which describes the new namespace
"""
pass
def deleteNamespace(self, name):
"""
Delete an existing namespace. Only empty namespaces (no tables) can be removed.
Blocks until namespace has been successfully deleted or an
exception is thrown.
Parameters:
- name: namespace name
"""
pass
def getNamespaceDescriptor(self, name):
"""
Get a namespace descriptor by name.
@retrun the descriptor
Parameters:
- name: name of namespace descriptor
"""
pass
def listNamespaceDescriptors(self):
"""
@return all namespaces
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def exists(self, table, tget):
"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- tget: the TGet to check for
"""
self.send_exists(table, tget)
return self.recv_exists()
def send_exists(self, table, tget):
self._oprot.writeMessageBegin('exists', TMessageType.CALL, self._seqid)
args = exists_args()
args.table = table
args.tget = tget
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = exists_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists failed: unknown result")
def existsAll(self, table, tgets):
"""
Test for the existence of columns in the table, as specified by the TGets.
This will return an array of booleans. Each value will be true if the related Get matches
one or more keys, false if not.
Parameters:
- table: the table to check on
- tgets: a list of TGets to check for
"""
self.send_existsAll(table, tgets)
return self.recv_existsAll()
def send_existsAll(self, table, tgets):
self._oprot.writeMessageBegin('existsAll', TMessageType.CALL, self._seqid)
args = existsAll_args()
args.table = table
args.tgets = tgets
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_existsAll(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = existsAll_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "existsAll failed: unknown result")
def get(self, table, tget):
"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- tget: the TGet to fetch
"""
self.send_get(table, tget)
return self.recv_get()
def send_get(self, table, tget):
self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
args = get_args()
args.table = table
args.tget = tget
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result")
def getMultiple(self, table, tgets):
"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
self.send_getMultiple(table, tgets)
return self.recv_getMultiple()
def send_getMultiple(self, table, tgets):
self._oprot.writeMessageBegin('getMultiple', TMessageType.CALL, self._seqid)
args = getMultiple_args()
args.table = table
args.tgets = tgets
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getMultiple(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getMultiple_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getMultiple failed: unknown result")
def put(self, table, tput):
"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- tput: the TPut to put
"""
self.send_put(table, tput)
self.recv_put()
def send_put(self, table, tput):
self._oprot.writeMessageBegin('put', TMessageType.CALL, self._seqid)
args = put_args()
args.table = table
args.tput = tput
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_put(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = put_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def checkAndPut(self, table, row, family, qualifier, value, tput):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
self.send_checkAndPut(table, row, family, qualifier, value, tput)
return self.recv_checkAndPut()
def send_checkAndPut(self, table, row, family, qualifier, value, tput):
self._oprot.writeMessageBegin('checkAndPut', TMessageType.CALL, self._seqid)
args = checkAndPut_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.value = value
args.tput = tput
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndPut(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = checkAndPut_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result")
def putMultiple(self, table, tputs):
"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
self.send_putMultiple(table, tputs)
self.recv_putMultiple()
def send_putMultiple(self, table, tputs):
self._oprot.writeMessageBegin('putMultiple', TMessageType.CALL, self._seqid)
args = putMultiple_args()
args.table = table
args.tputs = tputs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_putMultiple(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = putMultiple_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteSingle(self, table, tdelete):
"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
self.send_deleteSingle(table, tdelete)
self.recv_deleteSingle()
def send_deleteSingle(self, table, tdelete):
self._oprot.writeMessageBegin('deleteSingle', TMessageType.CALL, self._seqid)
args = deleteSingle_args()
args.table = table
args.tdelete = tdelete
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteSingle(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteSingle_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteMultiple(self, table, tdeletes):
"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
self.send_deleteMultiple(table, tdeletes)
return self.recv_deleteMultiple()
def send_deleteMultiple(self, table, tdeletes):
self._oprot.writeMessageBegin('deleteMultiple', TMessageType.CALL, self._seqid)
args = deleteMultiple_args()
args.table = table
args.tdeletes = tdeletes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteMultiple(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteMultiple_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteMultiple failed: unknown result")
def checkAndDelete(self, table, row, family, qualifier, value, tdelete):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
self.send_checkAndDelete(table, row, family, qualifier, value, tdelete)
return self.recv_checkAndDelete()
def send_checkAndDelete(self, table, row, family, qualifier, value, tdelete):
self._oprot.writeMessageBegin('checkAndDelete', TMessageType.CALL, self._seqid)
args = checkAndDelete_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.value = value
args.tdelete = tdelete
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndDelete(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = checkAndDelete_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndDelete failed: unknown result")
def increment(self, table, tincrement):
"""
Parameters:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
self.send_increment(table, tincrement)
return self.recv_increment()
def send_increment(self, table, tincrement):
self._oprot.writeMessageBegin('increment', TMessageType.CALL, self._seqid)
args = increment_args()
args.table = table
args.tincrement = tincrement
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_increment(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = increment_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "increment failed: unknown result")
def append(self, table, tappend):
"""
Parameters:
- table: the table to append the value on
- tappend: the TAppend to append
"""
self.send_append(table, tappend)
return self.recv_append()
def send_append(self, table, tappend):
self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid)
args = append_args()
args.table = table
args.tappend = tappend
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = append_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result")
def openScanner(self, table, tscan):
"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
self.send_openScanner(table, tscan)
return self.recv_openScanner()
def send_openScanner(self, table, tscan):
self._oprot.writeMessageBegin('openScanner', TMessageType.CALL, self._seqid)
args = openScanner_args()
args.table = table
args.tscan = tscan
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_openScanner(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = openScanner_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "openScanner failed: unknown result")
def getScannerRows(self, scannerId, numRows):
"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
self.send_getScannerRows(scannerId, numRows)
return self.recv_getScannerRows()
def send_getScannerRows(self, scannerId, numRows):
self._oprot.writeMessageBegin('getScannerRows', TMessageType.CALL, self._seqid)
args = getScannerRows_args()
args.scannerId = scannerId
args.numRows = numRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScannerRows(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getScannerRows_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerRows failed: unknown result")
def closeScanner(self, scannerId):
"""
Closes the scanner. Should be called to free server side resources timely.
Typically close once the scanner is not needed anymore, i.e. after looping
over it to get all the required rows.
Parameters:
- scannerId: the Id of the Scanner to close *
"""
self.send_closeScanner(scannerId)
self.recv_closeScanner()
def send_closeScanner(self, scannerId):
self._oprot.writeMessageBegin('closeScanner', TMessageType.CALL, self._seqid)
args = closeScanner_args()
args.scannerId = scannerId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_closeScanner(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = closeScanner_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRow(self, table, trowMutations):
"""
mutateRow performs multiple mutations atomically on a single row.
Parameters:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
self.send_mutateRow(table, trowMutations)
self.recv_mutateRow()
def send_mutateRow(self, table, trowMutations):
self._oprot.writeMessageBegin('mutateRow', TMessageType.CALL, self._seqid)
args = mutateRow_args()
args.table = table
args.trowMutations = trowMutations
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRow(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = mutateRow_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def getScannerResults(self, table, tscan, numRows):
"""
Get results for the provided TScan object.
This helper function opens a scanner, get the results and close the scanner.
@return between zero and numRows TResults
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
self.send_getScannerResults(table, tscan, numRows)
return self.recv_getScannerResults()
def send_getScannerResults(self, table, tscan, numRows):
self._oprot.writeMessageBegin('getScannerResults', TMessageType.CALL, self._seqid)
args = getScannerResults_args()
args.table = table
args.tscan = tscan
args.numRows = numRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScannerResults(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getScannerResults_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerResults failed: unknown result")
def getRegionLocation(self, table, row, reload):
"""
Given a table and a row get the location of the region that
would contain the given row key.
reload = true means the cache will be cleared and the location
will be fetched from meta.
Parameters:
- table
- row
- reload
"""
self.send_getRegionLocation(table, row, reload)
return self.recv_getRegionLocation()
def send_getRegionLocation(self, table, row, reload):
self._oprot.writeMessageBegin('getRegionLocation', TMessageType.CALL, self._seqid)
args = getRegionLocation_args()
args.table = table
args.row = row
args.reload = reload
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRegionLocation(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getRegionLocation_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRegionLocation failed: unknown result")
def getAllRegionLocations(self, table):
"""
Get all of the region locations for a given table.
Parameters:
- table
"""
self.send_getAllRegionLocations(table)
return self.recv_getAllRegionLocations()
def send_getAllRegionLocations(self, table):
self._oprot.writeMessageBegin('getAllRegionLocations', TMessageType.CALL, self._seqid)
args = getAllRegionLocations_args()
args.table = table
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getAllRegionLocations(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getAllRegionLocations_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllRegionLocations failed: unknown result")
def checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it mutates the row.
@return true if the row was mutated, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOp: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
self.send_checkAndMutate(table, row, family, qualifier, compareOp, value, rowMutations)
return self.recv_checkAndMutate()
def send_checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations):
self._oprot.writeMessageBegin('checkAndMutate', TMessageType.CALL, self._seqid)
args = checkAndMutate_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.compareOp = compareOp
args.value = value
args.rowMutations = rowMutations
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndMutate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = checkAndMutate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndMutate failed: unknown result")
def getTableDescriptor(self, table):
"""
Get a table descriptor.
@return the TableDescriptor of the giving tablename
Parameters:
- table: the tablename of the table to get tableDescriptor
"""
self.send_getTableDescriptor(table)
return self.recv_getTableDescriptor()
def send_getTableDescriptor(self, table):
self._oprot.writeMessageBegin('getTableDescriptor', TMessageType.CALL, self._seqid)
args = getTableDescriptor_args()
args.table = table
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableDescriptor(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTableDescriptor_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableDescriptor failed: unknown result")
def getTableDescriptors(self, tables):
"""
Get table descriptors of tables.
@return the TableDescriptor of the giving tablename
Parameters:
- tables: the tablename list of the tables to get tableDescriptor
"""
self.send_getTableDescriptors(tables)
return self.recv_getTableDescriptors()
def send_getTableDescriptors(self, tables):
self._oprot.writeMessageBegin('getTableDescriptors', TMessageType.CALL, self._seqid)
args = getTableDescriptors_args()
args.tables = tables
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableDescriptors(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTableDescriptors_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableDescriptors failed: unknown result")
def tableExists(self, tableName):
"""
@return true if table exists already, false if not
Parameters:
- tableName: the tablename of the tables to check
"""
self.send_tableExists(tableName)
return self.recv_tableExists()
def send_tableExists(self, tableName):
self._oprot.writeMessageBegin('tableExists', TMessageType.CALL, self._seqid)
args = tableExists_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_tableExists(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = tableExists_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "tableExists failed: unknown result")
def getTableDescriptorsByPattern(self, regex, includeSysTables):
"""
Get table descriptors of tables that match the given pattern
@return the tableDescriptors of the matching table
Parameters:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
self.send_getTableDescriptorsByPattern(regex, includeSysTables)
return self.recv_getTableDescriptorsByPattern()
def send_getTableDescriptorsByPattern(self, regex, includeSysTables):
self._oprot.writeMessageBegin('getTableDescriptorsByPattern', TMessageType.CALL, self._seqid)
args = getTableDescriptorsByPattern_args()
args.regex = regex
args.includeSysTables = includeSysTables
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableDescriptorsByPattern(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTableDescriptorsByPattern_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableDescriptorsByPattern failed: unknown result")
def getTableDescriptorsByNamespace(self, name):
"""
Get table descriptors of tables in the given namespace
@return the tableDescriptors in the namespce
Parameters:
- name: The namesapce's name
"""
self.send_getTableDescriptorsByNamespace(name)
return self.recv_getTableDescriptorsByNamespace()
def send_getTableDescriptorsByNamespace(self, name):
self._oprot.writeMessageBegin('getTableDescriptorsByNamespace', TMessageType.CALL, self._seqid)
args = getTableDescriptorsByNamespace_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableDescriptorsByNamespace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTableDescriptorsByNamespace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableDescriptorsByNamespace failed: unknown result")
def getTableNamesByPattern(self, regex, includeSysTables):
"""
Get table names of tables that match the given pattern
@return the table names of the matching table
Parameters:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
self.send_getTableNamesByPattern(regex, includeSysTables)
return self.recv_getTableNamesByPattern()
def send_getTableNamesByPattern(self, regex, includeSysTables):
self._oprot.writeMessageBegin('getTableNamesByPattern', TMessageType.CALL, self._seqid)
args = getTableNamesByPattern_args()
args.regex = regex
args.includeSysTables = includeSysTables
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableNamesByPattern(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTableNamesByPattern_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableNamesByPattern failed: unknown result")
def getTableNamesByNamespace(self, name):
"""
Get table names of tables in the given namespace
@return the table names of the matching table
Parameters:
- name: The namesapce's name
"""
self.send_getTableNamesByNamespace(name)
return self.recv_getTableNamesByNamespace()
def send_getTableNamesByNamespace(self, name):
self._oprot.writeMessageBegin('getTableNamesByNamespace', TMessageType.CALL, self._seqid)
args = getTableNamesByNamespace_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableNamesByNamespace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTableNamesByNamespace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableNamesByNamespace failed: unknown result")
def createTable(self, desc, splitKeys):
"""
Creates a new table with an initial set of empty regions defined by the specified split keys.
The total number of regions created will be the number of split keys plus one. Synchronous
operation.
Parameters:
- desc: table descriptor for table
- splitKeys: rray of split keys for the initial regions of the table
"""
self.send_createTable(desc, splitKeys)
self.recv_createTable()
def send_createTable(self, desc, splitKeys):
self._oprot.writeMessageBegin('createTable', TMessageType.CALL, self._seqid)
args = createTable_args()
args.desc = desc
args.splitKeys = splitKeys
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createTable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = createTable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteTable(self, tableName):
"""
Deletes a table. Synchronous operation.
Parameters:
- tableName: the tablename to delete
"""
self.send_deleteTable(tableName)
self.recv_deleteTable()
def send_deleteTable(self, tableName):
self._oprot.writeMessageBegin('deleteTable', TMessageType.CALL, self._seqid)
args = deleteTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteTable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteTable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def truncateTable(self, tableName, preserveSplits):
"""
Truncate a table. Synchronous operation.
Parameters:
- tableName: the tablename to truncate
- preserveSplits: whether to preserve previous splits
"""
self.send_truncateTable(tableName, preserveSplits)
self.recv_truncateTable()
def send_truncateTable(self, tableName, preserveSplits):
self._oprot.writeMessageBegin('truncateTable', TMessageType.CALL, self._seqid)
args = truncateTable_args()
args.tableName = tableName
args.preserveSplits = preserveSplits
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_truncateTable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = truncateTable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def enableTable(self, tableName):
"""
Enalbe a table
Parameters:
- tableName: the tablename to enable
"""
self.send_enableTable(tableName)
self.recv_enableTable()
def send_enableTable(self, tableName):
self._oprot.writeMessageBegin('enableTable', TMessageType.CALL, self._seqid)
args = enableTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_enableTable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = enableTable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def disableTable(self, tableName):
"""
Disable a table
Parameters:
- tableName: the tablename to disable
"""
self.send_disableTable(tableName)
self.recv_disableTable()
def send_disableTable(self, tableName):
self._oprot.writeMessageBegin('disableTable', TMessageType.CALL, self._seqid)
args = disableTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_disableTable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = disableTable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def isTableEnabled(self, tableName):
"""
@return true if table is enabled, false if not
Parameters:
- tableName: the tablename to check
"""
self.send_isTableEnabled(tableName)
return self.recv_isTableEnabled()
def send_isTableEnabled(self, tableName):
self._oprot.writeMessageBegin('isTableEnabled', TMessageType.CALL, self._seqid)
args = isTableEnabled_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTableEnabled(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = isTableEnabled_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableEnabled failed: unknown result")
def isTableDisabled(self, tableName):
"""
@return true if table is disabled, false if not
Parameters:
- tableName: the tablename to check
"""
self.send_isTableDisabled(tableName)
return self.recv_isTableDisabled()
def send_isTableDisabled(self, tableName):
self._oprot.writeMessageBegin('isTableDisabled', TMessageType.CALL, self._seqid)
args = isTableDisabled_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTableDisabled(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = isTableDisabled_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableDisabled failed: unknown result")
def isTableAvailable(self, tableName):
"""
@return true if table is available, false if not
Parameters:
- tableName: the tablename to check
"""
self.send_isTableAvailable(tableName)
return self.recv_isTableAvailable()
def send_isTableAvailable(self, tableName):
self._oprot.writeMessageBegin('isTableAvailable', TMessageType.CALL, self._seqid)
args = isTableAvailable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTableAvailable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = isTableAvailable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableAvailable failed: unknown result")
def isTableAvailableWithSplit(self, tableName, splitKeys):
"""
* Use this api to check if the table has been created with the specified number of splitkeys
* which was used while creating the given table. Note : If this api is used after a table's
* region gets splitted, the api may return false.
*
* @return true if table is available, false if not
*
Parameters:
- tableName: the tablename to check
- splitKeys: keys to check if the table has been created with all split keys
"""
self.send_isTableAvailableWithSplit(tableName, splitKeys)
return self.recv_isTableAvailableWithSplit()
def send_isTableAvailableWithSplit(self, tableName, splitKeys):
self._oprot.writeMessageBegin('isTableAvailableWithSplit', TMessageType.CALL, self._seqid)
args = isTableAvailableWithSplit_args()
args.tableName = tableName
args.splitKeys = splitKeys
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTableAvailableWithSplit(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = isTableAvailableWithSplit_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableAvailableWithSplit failed: unknown result")
def addColumnFamily(self, tableName, column):
"""
Add a column family to an existing table. Synchronous operation.
Parameters:
- tableName: the tablename to add column family to
- column: column family descriptor of column family to be added
"""
self.send_addColumnFamily(tableName, column)
self.recv_addColumnFamily()
def send_addColumnFamily(self, tableName, column):
self._oprot.writeMessageBegin('addColumnFamily', TMessageType.CALL, self._seqid)
args = addColumnFamily_args()
args.tableName = tableName
args.column = column
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addColumnFamily(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = addColumnFamily_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteColumnFamily(self, tableName, column):
"""
Delete a column family from a table. Synchronous operation.
Parameters:
- tableName: the tablename to delete column family from
- column: name of column family to be deleted
"""
self.send_deleteColumnFamily(tableName, column)
self.recv_deleteColumnFamily()
def send_deleteColumnFamily(self, tableName, column):
self._oprot.writeMessageBegin('deleteColumnFamily', TMessageType.CALL, self._seqid)
args = deleteColumnFamily_args()
args.tableName = tableName
args.column = column
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteColumnFamily(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteColumnFamily_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def modifyColumnFamily(self, tableName, column):
"""
Modify an existing column family on a table. Synchronous operation.
Parameters:
- tableName: the tablename to modify column family
- column: column family descriptor of column family to be modified
"""
self.send_modifyColumnFamily(tableName, column)
self.recv_modifyColumnFamily()
def send_modifyColumnFamily(self, tableName, column):
self._oprot.writeMessageBegin('modifyColumnFamily', TMessageType.CALL, self._seqid)
args = modifyColumnFamily_args()
args.tableName = tableName
args.column = column
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_modifyColumnFamily(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = modifyColumnFamily_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def modifyTable(self, desc):
"""
Modify an existing table
Parameters:
- desc: the descriptor of the table to modify
"""
self.send_modifyTable(desc)
self.recv_modifyTable()
def send_modifyTable(self, desc):
self._oprot.writeMessageBegin('modifyTable', TMessageType.CALL, self._seqid)
args = modifyTable_args()
args.desc = desc
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_modifyTable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = modifyTable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def createNamespace(self, namespaceDesc):
"""
Create a new namespace. Blocks until namespace has been successfully created or an exception is
thrown
Parameters:
- namespaceDesc: descriptor which describes the new namespace
"""
self.send_createNamespace(namespaceDesc)
self.recv_createNamespace()
def send_createNamespace(self, namespaceDesc):
self._oprot.writeMessageBegin('createNamespace', TMessageType.CALL, self._seqid)
args = createNamespace_args()
args.namespaceDesc = namespaceDesc
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createNamespace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = createNamespace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def modifyNamespace(self, namespaceDesc):
"""
Modify an existing namespace. Blocks until namespace has been successfully modified or an
exception is thrown
Parameters:
- namespaceDesc: descriptor which describes the new namespace
"""
self.send_modifyNamespace(namespaceDesc)
self.recv_modifyNamespace()
def send_modifyNamespace(self, namespaceDesc):
self._oprot.writeMessageBegin('modifyNamespace', TMessageType.CALL, self._seqid)
args = modifyNamespace_args()
args.namespaceDesc = namespaceDesc
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_modifyNamespace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = modifyNamespace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteNamespace(self, name):
"""
Delete an existing namespace. Only empty namespaces (no tables) can be removed.
Blocks until namespace has been successfully deleted or an
exception is thrown.
Parameters:
- name: namespace name
"""
self.send_deleteNamespace(name)
self.recv_deleteNamespace()
def send_deleteNamespace(self, name):
self._oprot.writeMessageBegin('deleteNamespace', TMessageType.CALL, self._seqid)
args = deleteNamespace_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteNamespace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteNamespace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def getNamespaceDescriptor(self, name):
"""
Get a namespace descriptor by name.
@retrun the descriptor
Parameters:
- name: name of namespace descriptor
"""
self.send_getNamespaceDescriptor(name)
return self.recv_getNamespaceDescriptor()
def send_getNamespaceDescriptor(self, name):
self._oprot.writeMessageBegin('getNamespaceDescriptor', TMessageType.CALL, self._seqid)
args = getNamespaceDescriptor_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getNamespaceDescriptor(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getNamespaceDescriptor_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getNamespaceDescriptor failed: unknown result")
def listNamespaceDescriptors(self):
"""
@return all namespaces
"""
self.send_listNamespaceDescriptors()
return self.recv_listNamespaceDescriptors()
def send_listNamespaceDescriptors(self):
self._oprot.writeMessageBegin('listNamespaceDescriptors', TMessageType.CALL, self._seqid)
args = listNamespaceDescriptors_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_listNamespaceDescriptors(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = listNamespaceDescriptors_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "listNamespaceDescriptors failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["exists"] = Processor.process_exists
self._processMap["existsAll"] = Processor.process_existsAll
self._processMap["get"] = Processor.process_get
self._processMap["getMultiple"] = Processor.process_getMultiple
self._processMap["put"] = Processor.process_put
self._processMap["checkAndPut"] = Processor.process_checkAndPut
self._processMap["putMultiple"] = Processor.process_putMultiple
self._processMap["deleteSingle"] = Processor.process_deleteSingle
self._processMap["deleteMultiple"] = Processor.process_deleteMultiple
self._processMap["checkAndDelete"] = Processor.process_checkAndDelete
self._processMap["increment"] = Processor.process_increment
self._processMap["append"] = Processor.process_append
self._processMap["openScanner"] = Processor.process_openScanner
self._processMap["getScannerRows"] = Processor.process_getScannerRows
self._processMap["closeScanner"] = Processor.process_closeScanner
self._processMap["mutateRow"] = Processor.process_mutateRow
self._processMap["getScannerResults"] = Processor.process_getScannerResults
self._processMap["getRegionLocation"] = Processor.process_getRegionLocation
self._processMap["getAllRegionLocations"] = Processor.process_getAllRegionLocations
self._processMap["checkAndMutate"] = Processor.process_checkAndMutate
self._processMap["getTableDescriptor"] = Processor.process_getTableDescriptor
self._processMap["getTableDescriptors"] = Processor.process_getTableDescriptors
self._processMap["tableExists"] = Processor.process_tableExists
self._processMap["getTableDescriptorsByPattern"] = Processor.process_getTableDescriptorsByPattern
self._processMap["getTableDescriptorsByNamespace"] = Processor.process_getTableDescriptorsByNamespace
self._processMap["getTableNamesByPattern"] = Processor.process_getTableNamesByPattern
self._processMap["getTableNamesByNamespace"] = Processor.process_getTableNamesByNamespace
self._processMap["createTable"] = Processor.process_createTable
self._processMap["deleteTable"] = Processor.process_deleteTable
self._processMap["truncateTable"] = Processor.process_truncateTable
self._processMap["enableTable"] = Processor.process_enableTable
self._processMap["disableTable"] = Processor.process_disableTable
self._processMap["isTableEnabled"] = Processor.process_isTableEnabled
self._processMap["isTableDisabled"] = Processor.process_isTableDisabled
self._processMap["isTableAvailable"] = Processor.process_isTableAvailable
self._processMap["isTableAvailableWithSplit"] = Processor.process_isTableAvailableWithSplit
self._processMap["addColumnFamily"] = Processor.process_addColumnFamily
self._processMap["deleteColumnFamily"] = Processor.process_deleteColumnFamily
self._processMap["modifyColumnFamily"] = Processor.process_modifyColumnFamily
self._processMap["modifyTable"] = Processor.process_modifyTable
self._processMap["createNamespace"] = Processor.process_createNamespace
self._processMap["modifyNamespace"] = Processor.process_modifyNamespace
self._processMap["deleteNamespace"] = Processor.process_deleteNamespace
self._processMap["getNamespaceDescriptor"] = Processor.process_getNamespaceDescriptor
self._processMap["listNamespaceDescriptors"] = Processor.process_listNamespaceDescriptors
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_exists(self, seqid, iprot, oprot):
args = exists_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_result()
try:
result.success = self._handler.exists(args.table, args.tget)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("exists", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_existsAll(self, seqid, iprot, oprot):
args = existsAll_args()
args.read(iprot)
iprot.readMessageEnd()
result = existsAll_result()
try:
result.success = self._handler.existsAll(args.table, args.tgets)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("existsAll", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get(self, seqid, iprot, oprot):
args = get_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_result()
try:
result.success = self._handler.get(args.table, args.tget)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getMultiple(self, seqid, iprot, oprot):
args = getMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = getMultiple_result()
try:
result.success = self._handler.getMultiple(args.table, args.tgets)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getMultiple", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_put(self, seqid, iprot, oprot):
args = put_args()
args.read(iprot)
iprot.readMessageEnd()
result = put_result()
try:
self._handler.put(args.table, args.tput)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("put", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndPut(self, seqid, iprot, oprot):
args = checkAndPut_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndPut_result()
try:
result.success = self._handler.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.tput)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("checkAndPut", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_putMultiple(self, seqid, iprot, oprot):
args = putMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = putMultiple_result()
try:
self._handler.putMultiple(args.table, args.tputs)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("putMultiple", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteSingle(self, seqid, iprot, oprot):
args = deleteSingle_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteSingle_result()
try:
self._handler.deleteSingle(args.table, args.tdelete)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteSingle", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteMultiple(self, seqid, iprot, oprot):
args = deleteMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteMultiple_result()
try:
result.success = self._handler.deleteMultiple(args.table, args.tdeletes)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteMultiple", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndDelete(self, seqid, iprot, oprot):
args = checkAndDelete_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndDelete_result()
try:
result.success = self._handler.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value, args.tdelete)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("checkAndDelete", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_increment(self, seqid, iprot, oprot):
args = increment_args()
args.read(iprot)
iprot.readMessageEnd()
result = increment_result()
try:
result.success = self._handler.increment(args.table, args.tincrement)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("increment", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append(self, seqid, iprot, oprot):
args = append_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_result()
try:
result.success = self._handler.append(args.table, args.tappend)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("append", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_openScanner(self, seqid, iprot, oprot):
args = openScanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = openScanner_result()
try:
result.success = self._handler.openScanner(args.table, args.tscan)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("openScanner", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getScannerRows(self, seqid, iprot, oprot):
args = getScannerRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScannerRows_result()
try:
result.success = self._handler.getScannerRows(args.scannerId, args.numRows)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TIllegalArgument as ia:
msg_type = TMessageType.REPLY
result.ia = ia
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getScannerRows", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_closeScanner(self, seqid, iprot, oprot):
args = closeScanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = closeScanner_result()
try:
self._handler.closeScanner(args.scannerId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TIllegalArgument as ia:
msg_type = TMessageType.REPLY
result.ia = ia
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("closeScanner", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRow(self, seqid, iprot, oprot):
args = mutateRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRow_result()
try:
self._handler.mutateRow(args.table, args.trowMutations)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("mutateRow", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getScannerResults(self, seqid, iprot, oprot):
args = getScannerResults_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScannerResults_result()
try:
result.success = self._handler.getScannerResults(args.table, args.tscan, args.numRows)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getScannerResults", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRegionLocation(self, seqid, iprot, oprot):
args = getRegionLocation_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRegionLocation_result()
try:
result.success = self._handler.getRegionLocation(args.table, args.row, args.reload)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getRegionLocation", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getAllRegionLocations(self, seqid, iprot, oprot):
args = getAllRegionLocations_args()
args.read(iprot)
iprot.readMessageEnd()
result = getAllRegionLocations_result()
try:
result.success = self._handler.getAllRegionLocations(args.table)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getAllRegionLocations", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndMutate(self, seqid, iprot, oprot):
args = checkAndMutate_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndMutate_result()
try:
result.success = self._handler.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOp, args.value, args.rowMutations)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("checkAndMutate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableDescriptor(self, seqid, iprot, oprot):
args = getTableDescriptor_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableDescriptor_result()
try:
result.success = self._handler.getTableDescriptor(args.table)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTableDescriptor", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableDescriptors(self, seqid, iprot, oprot):
args = getTableDescriptors_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableDescriptors_result()
try:
result.success = self._handler.getTableDescriptors(args.tables)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTableDescriptors", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_tableExists(self, seqid, iprot, oprot):
args = tableExists_args()
args.read(iprot)
iprot.readMessageEnd()
result = tableExists_result()
try:
result.success = self._handler.tableExists(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("tableExists", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableDescriptorsByPattern(self, seqid, iprot, oprot):
args = getTableDescriptorsByPattern_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableDescriptorsByPattern_result()
try:
result.success = self._handler.getTableDescriptorsByPattern(args.regex, args.includeSysTables)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTableDescriptorsByPattern", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableDescriptorsByNamespace(self, seqid, iprot, oprot):
args = getTableDescriptorsByNamespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableDescriptorsByNamespace_result()
try:
result.success = self._handler.getTableDescriptorsByNamespace(args.name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTableDescriptorsByNamespace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableNamesByPattern(self, seqid, iprot, oprot):
args = getTableNamesByPattern_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableNamesByPattern_result()
try:
result.success = self._handler.getTableNamesByPattern(args.regex, args.includeSysTables)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTableNamesByPattern", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableNamesByNamespace(self, seqid, iprot, oprot):
args = getTableNamesByNamespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableNamesByNamespace_result()
try:
result.success = self._handler.getTableNamesByNamespace(args.name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTableNamesByNamespace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createTable(self, seqid, iprot, oprot):
args = createTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = createTable_result()
try:
self._handler.createTable(args.desc, args.splitKeys)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createTable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteTable(self, seqid, iprot, oprot):
args = deleteTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteTable_result()
try:
self._handler.deleteTable(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteTable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_truncateTable(self, seqid, iprot, oprot):
args = truncateTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = truncateTable_result()
try:
self._handler.truncateTable(args.tableName, args.preserveSplits)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("truncateTable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_enableTable(self, seqid, iprot, oprot):
args = enableTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = enableTable_result()
try:
self._handler.enableTable(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("enableTable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_disableTable(self, seqid, iprot, oprot):
args = disableTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = disableTable_result()
try:
self._handler.disableTable(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("disableTable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTableEnabled(self, seqid, iprot, oprot):
args = isTableEnabled_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTableEnabled_result()
try:
result.success = self._handler.isTableEnabled(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("isTableEnabled", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTableDisabled(self, seqid, iprot, oprot):
args = isTableDisabled_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTableDisabled_result()
try:
result.success = self._handler.isTableDisabled(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("isTableDisabled", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTableAvailable(self, seqid, iprot, oprot):
args = isTableAvailable_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTableAvailable_result()
try:
result.success = self._handler.isTableAvailable(args.tableName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("isTableAvailable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTableAvailableWithSplit(self, seqid, iprot, oprot):
args = isTableAvailableWithSplit_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTableAvailableWithSplit_result()
try:
result.success = self._handler.isTableAvailableWithSplit(args.tableName, args.splitKeys)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("isTableAvailableWithSplit", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addColumnFamily(self, seqid, iprot, oprot):
args = addColumnFamily_args()
args.read(iprot)
iprot.readMessageEnd()
result = addColumnFamily_result()
try:
self._handler.addColumnFamily(args.tableName, args.column)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("addColumnFamily", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteColumnFamily(self, seqid, iprot, oprot):
args = deleteColumnFamily_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteColumnFamily_result()
try:
self._handler.deleteColumnFamily(args.tableName, args.column)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteColumnFamily", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_modifyColumnFamily(self, seqid, iprot, oprot):
args = modifyColumnFamily_args()
args.read(iprot)
iprot.readMessageEnd()
result = modifyColumnFamily_result()
try:
self._handler.modifyColumnFamily(args.tableName, args.column)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("modifyColumnFamily", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_modifyTable(self, seqid, iprot, oprot):
args = modifyTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = modifyTable_result()
try:
self._handler.modifyTable(args.desc)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("modifyTable", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createNamespace(self, seqid, iprot, oprot):
args = createNamespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = createNamespace_result()
try:
self._handler.createNamespace(args.namespaceDesc)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createNamespace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_modifyNamespace(self, seqid, iprot, oprot):
args = modifyNamespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = modifyNamespace_result()
try:
self._handler.modifyNamespace(args.namespaceDesc)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("modifyNamespace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteNamespace(self, seqid, iprot, oprot):
args = deleteNamespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteNamespace_result()
try:
self._handler.deleteNamespace(args.name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteNamespace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getNamespaceDescriptor(self, seqid, iprot, oprot):
args = getNamespaceDescriptor_args()
args.read(iprot)
iprot.readMessageEnd()
result = getNamespaceDescriptor_result()
try:
result.success = self._handler.getNamespaceDescriptor(args.name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getNamespaceDescriptor", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_listNamespaceDescriptors(self, seqid, iprot, oprot):
args = listNamespaceDescriptors_args()
args.read(iprot)
iprot.readMessageEnd()
result = listNamespaceDescriptors_result()
try:
result.success = self._handler.listNamespaceDescriptors()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("listNamespaceDescriptors", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class exists_args(object):
"""
Attributes:
- table: the table to check on
- tget: the TGet to check for
"""
def __init__(self, table=None, tget=None,):
self.table = table
self.tget = tget
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tget = TGet()
self.tget.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('exists_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tget is not None:
oprot.writeFieldBegin('tget', TType.STRUCT, 2)
self.tget.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tget is None:
raise TProtocolException(message='Required field tget is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(exists_args)
exists_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tget', [TGet, None], None, ), # 2
)
class exists_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('exists_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(exists_result)
exists_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class existsAll_args(object):
"""
Attributes:
- table: the table to check on
- tgets: a list of TGets to check for
"""
def __init__(self, table=None, tgets=None,):
self.table = table
self.tgets = tgets
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tgets = []
(_etype172, _size169) = iprot.readListBegin()
for _i173 in range(_size169):
_elem174 = TGet()
_elem174.read(iprot)
self.tgets.append(_elem174)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('existsAll_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tgets is not None:
oprot.writeFieldBegin('tgets', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tgets))
for iter175 in self.tgets:
iter175.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tgets is None:
raise TProtocolException(message='Required field tgets is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(existsAll_args)
existsAll_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tgets', (TType.STRUCT, [TGet, None], False), None, ), # 2
)
class existsAll_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype179, _size176) = iprot.readListBegin()
for _i180 in range(_size176):
_elem181 = iprot.readBool()
self.success.append(_elem181)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('existsAll_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.BOOL, len(self.success))
for iter182 in self.success:
oprot.writeBool(iter182)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(existsAll_result)
existsAll_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.BOOL, None, False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class get_args(object):
"""
Attributes:
- table: the table to get from
- tget: the TGet to fetch
"""
def __init__(self, table=None, tget=None,):
self.table = table
self.tget = tget
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tget = TGet()
self.tget.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tget is not None:
oprot.writeFieldBegin('tget', TType.STRUCT, 2)
self.tget.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tget is None:
raise TProtocolException(message='Required field tget is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_args)
get_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tget', [TGet, None], None, ), # 2
)
class get_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_result)
get_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TResult, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getMultiple_args(object):
"""
Attributes:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
def __init__(self, table=None, tgets=None,):
self.table = table
self.tgets = tgets
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tgets = []
(_etype186, _size183) = iprot.readListBegin()
for _i187 in range(_size183):
_elem188 = TGet()
_elem188.read(iprot)
self.tgets.append(_elem188)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tgets is not None:
oprot.writeFieldBegin('tgets', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tgets))
for iter189 in self.tgets:
iter189.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tgets is None:
raise TProtocolException(message='Required field tgets is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getMultiple_args)
getMultiple_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tgets', (TType.STRUCT, [TGet, None], False), None, ), # 2
)
class getMultiple_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype193, _size190) = iprot.readListBegin()
for _i194 in range(_size190):
_elem195 = TResult()
_elem195.read(iprot)
self.success.append(_elem195)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getMultiple_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter196 in self.success:
iter196.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getMultiple_result)
getMultiple_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TResult, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class put_args(object):
"""
Attributes:
- table: the table to put data in
- tput: the TPut to put
"""
def __init__(self, table=None, tput=None,):
self.table = table
self.tput = tput
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tput = TPut()
self.tput.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('put_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tput is not None:
oprot.writeFieldBegin('tput', TType.STRUCT, 2)
self.tput.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tput is None:
raise TProtocolException(message='Required field tput is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(put_args)
put_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tput', [TPut, None], None, ), # 2
)
class put_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('put_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(put_result)
put_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class checkAndPut_args(object):
"""
Attributes:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, tput=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.value = value
self.tput = tput
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.tput = TPut()
self.tput.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('checkAndPut_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeBinary(self.value)
oprot.writeFieldEnd()
if self.tput is not None:
oprot.writeFieldBegin('tput', TType.STRUCT, 6)
self.tput.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocolException(message='Required field qualifier is unset!')
if self.tput is None:
raise TProtocolException(message='Required field tput is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(checkAndPut_args)
checkAndPut_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.STRING, 'family', 'BINARY', None, ), # 3
(4, TType.STRING, 'qualifier', 'BINARY', None, ), # 4
(5, TType.STRING, 'value', 'BINARY', None, ), # 5
(6, TType.STRUCT, 'tput', [TPut, None], None, ), # 6
)
class checkAndPut_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('checkAndPut_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(checkAndPut_result)
checkAndPut_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class putMultiple_args(object):
"""
Attributes:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
def __init__(self, table=None, tputs=None,):
self.table = table
self.tputs = tputs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tputs = []
(_etype200, _size197) = iprot.readListBegin()
for _i201 in range(_size197):
_elem202 = TPut()
_elem202.read(iprot)
self.tputs.append(_elem202)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('putMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tputs is not None:
oprot.writeFieldBegin('tputs', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tputs))
for iter203 in self.tputs:
iter203.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tputs is None:
raise TProtocolException(message='Required field tputs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(putMultiple_args)
putMultiple_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tputs', (TType.STRUCT, [TPut, None], False), None, ), # 2
)
class putMultiple_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('putMultiple_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(putMultiple_result)
putMultiple_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteSingle_args(object):
"""
Attributes:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
def __init__(self, table=None, tdelete=None,):
self.table = table
self.tdelete = tdelete
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tdelete = TDelete()
self.tdelete.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteSingle_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tdelete is not None:
oprot.writeFieldBegin('tdelete', TType.STRUCT, 2)
self.tdelete.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tdelete is None:
raise TProtocolException(message='Required field tdelete is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteSingle_args)
deleteSingle_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tdelete', [TDelete, None], None, ), # 2
)
class deleteSingle_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteSingle_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteSingle_result)
deleteSingle_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteMultiple_args(object):
"""
Attributes:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
def __init__(self, table=None, tdeletes=None,):
self.table = table
self.tdeletes = tdeletes
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tdeletes = []
(_etype207, _size204) = iprot.readListBegin()
for _i208 in range(_size204):
_elem209 = TDelete()
_elem209.read(iprot)
self.tdeletes.append(_elem209)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tdeletes is not None:
oprot.writeFieldBegin('tdeletes', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tdeletes))
for iter210 in self.tdeletes:
iter210.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tdeletes is None:
raise TProtocolException(message='Required field tdeletes is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteMultiple_args)
deleteMultiple_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tdeletes', (TType.STRUCT, [TDelete, None], False), None, ), # 2
)
class deleteMultiple_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype214, _size211) = iprot.readListBegin()
for _i215 in range(_size211):
_elem216 = TDelete()
_elem216.read(iprot)
self.success.append(_elem216)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteMultiple_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter217 in self.success:
iter217.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteMultiple_result)
deleteMultiple_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TDelete, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class checkAndDelete_args(object):
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, tdelete=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.value = value
self.tdelete = tdelete
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.tdelete = TDelete()
self.tdelete.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('checkAndDelete_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeBinary(self.value)
oprot.writeFieldEnd()
if self.tdelete is not None:
oprot.writeFieldBegin('tdelete', TType.STRUCT, 6)
self.tdelete.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocolException(message='Required field qualifier is unset!')
if self.tdelete is None:
raise TProtocolException(message='Required field tdelete is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(checkAndDelete_args)
checkAndDelete_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.STRING, 'family', 'BINARY', None, ), # 3
(4, TType.STRING, 'qualifier', 'BINARY', None, ), # 4
(5, TType.STRING, 'value', 'BINARY', None, ), # 5
(6, TType.STRUCT, 'tdelete', [TDelete, None], None, ), # 6
)
class checkAndDelete_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('checkAndDelete_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(checkAndDelete_result)
checkAndDelete_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class increment_args(object):
"""
Attributes:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
def __init__(self, table=None, tincrement=None,):
self.table = table
self.tincrement = tincrement
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tincrement = TIncrement()
self.tincrement.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('increment_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tincrement is not None:
oprot.writeFieldBegin('tincrement', TType.STRUCT, 2)
self.tincrement.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tincrement is None:
raise TProtocolException(message='Required field tincrement is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(increment_args)
increment_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tincrement', [TIncrement, None], None, ), # 2
)
class increment_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('increment_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(increment_result)
increment_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TResult, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class append_args(object):
"""
Attributes:
- table: the table to append the value on
- tappend: the TAppend to append
"""
def __init__(self, table=None, tappend=None,):
self.table = table
self.tappend = tappend
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tappend = TAppend()
self.tappend.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('append_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tappend is not None:
oprot.writeFieldBegin('tappend', TType.STRUCT, 2)
self.tappend.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tappend is None:
raise TProtocolException(message='Required field tappend is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(append_args)
append_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tappend', [TAppend, None], None, ), # 2
)
class append_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('append_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(append_result)
append_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TResult, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class openScanner_args(object):
"""
Attributes:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
def __init__(self, table=None, tscan=None,):
self.table = table
self.tscan = tscan
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tscan = TScan()
self.tscan.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('openScanner_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tscan is not None:
oprot.writeFieldBegin('tscan', TType.STRUCT, 2)
self.tscan.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tscan is None:
raise TProtocolException(message='Required field tscan is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(openScanner_args)
openScanner_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tscan', [TScan, None], None, ), # 2
)
class openScanner_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('openScanner_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(openScanner_result)
openScanner_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getScannerRows_args(object):
"""
Attributes:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
def __init__(self, scannerId=None, numRows=1,):
self.scannerId = scannerId
self.numRows = numRows
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scannerId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.numRows = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScannerRows_args')
if self.scannerId is not None:
oprot.writeFieldBegin('scannerId', TType.I32, 1)
oprot.writeI32(self.scannerId)
oprot.writeFieldEnd()
if self.numRows is not None:
oprot.writeFieldBegin('numRows', TType.I32, 2)
oprot.writeI32(self.numRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scannerId is None:
raise TProtocolException(message='Required field scannerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScannerRows_args)
getScannerRows_args.thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
(2, TType.I32, 'numRows', None, 1, ), # 2
)
class getScannerRows_result(object):
"""
Attributes:
- success
- io
- ia: if the scannerId is invalid
"""
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype221, _size218) = iprot.readListBegin()
for _i222 in range(_size218):
_elem223 = TResult()
_elem223.read(iprot)
self.success.append(_elem223)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = TIllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScannerRows_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter224 in self.success:
iter224.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScannerRows_result)
getScannerRows_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TResult, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
(2, TType.STRUCT, 'ia', [TIllegalArgument, None], None, ), # 2
)
class closeScanner_args(object):
"""
Attributes:
- scannerId: the Id of the Scanner to close *
"""
def __init__(self, scannerId=None,):
self.scannerId = scannerId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scannerId = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('closeScanner_args')
if self.scannerId is not None:
oprot.writeFieldBegin('scannerId', TType.I32, 1)
oprot.writeI32(self.scannerId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scannerId is None:
raise TProtocolException(message='Required field scannerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(closeScanner_args)
closeScanner_args.thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
)
class closeScanner_result(object):
"""
Attributes:
- io
- ia: if the scannerId is invalid
"""
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = TIllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('closeScanner_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(closeScanner_result)
closeScanner_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
(2, TType.STRUCT, 'ia', [TIllegalArgument, None], None, ), # 2
)
class mutateRow_args(object):
"""
Attributes:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
def __init__(self, table=None, trowMutations=None,):
self.table = table
self.trowMutations = trowMutations
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.trowMutations = TRowMutations()
self.trowMutations.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('mutateRow_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.trowMutations is not None:
oprot.writeFieldBegin('trowMutations', TType.STRUCT, 2)
self.trowMutations.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.trowMutations is None:
raise TProtocolException(message='Required field trowMutations is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(mutateRow_args)
mutateRow_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'trowMutations', [TRowMutations, None], None, ), # 2
)
class mutateRow_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('mutateRow_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(mutateRow_result)
mutateRow_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getScannerResults_args(object):
"""
Attributes:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
def __init__(self, table=None, tscan=None, numRows=1,):
self.table = table
self.tscan = tscan
self.numRows = numRows
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tscan = TScan()
self.tscan.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.numRows = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScannerResults_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.tscan is not None:
oprot.writeFieldBegin('tscan', TType.STRUCT, 2)
self.tscan.write(oprot)
oprot.writeFieldEnd()
if self.numRows is not None:
oprot.writeFieldBegin('numRows', TType.I32, 3)
oprot.writeI32(self.numRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.tscan is None:
raise TProtocolException(message='Required field tscan is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScannerResults_args)
getScannerResults_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tscan', [TScan, None], None, ), # 2
(3, TType.I32, 'numRows', None, 1, ), # 3
)
class getScannerResults_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype228, _size225) = iprot.readListBegin()
for _i229 in range(_size225):
_elem230 = TResult()
_elem230.read(iprot)
self.success.append(_elem230)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScannerResults_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter231 in self.success:
iter231.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScannerResults_result)
getScannerResults_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TResult, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getRegionLocation_args(object):
"""
Attributes:
- table
- row
- reload
"""
def __init__(self, table=None, row=None, reload=None,):
self.table = table
self.row = row
self.reload = reload
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.reload = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getRegionLocation_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.reload is not None:
oprot.writeFieldBegin('reload', TType.BOOL, 3)
oprot.writeBool(self.reload)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getRegionLocation_args)
getRegionLocation_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.BOOL, 'reload', None, None, ), # 3
)
class getRegionLocation_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = THRegionLocation()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getRegionLocation_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getRegionLocation_result)
getRegionLocation_result.thrift_spec = (
(0, TType.STRUCT, 'success', [THRegionLocation, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getAllRegionLocations_args(object):
"""
Attributes:
- table
"""
def __init__(self, table=None,):
self.table = table
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getAllRegionLocations_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getAllRegionLocations_args)
getAllRegionLocations_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
)
class getAllRegionLocations_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype235, _size232) = iprot.readListBegin()
for _i236 in range(_size232):
_elem237 = THRegionLocation()
_elem237.read(iprot)
self.success.append(_elem237)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getAllRegionLocations_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter238 in self.success:
iter238.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getAllRegionLocations_result)
getAllRegionLocations_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [THRegionLocation, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class checkAndMutate_args(object):
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOp: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
def __init__(self, table=None, row=None, family=None, qualifier=None, compareOp=None, value=None, rowMutations=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.compareOp = compareOp
self.value = value
self.rowMutations = rowMutations
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.compareOp = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.value = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.rowMutations = TRowMutations()
self.rowMutations.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('checkAndMutate_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeBinary(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.compareOp is not None:
oprot.writeFieldBegin('compareOp', TType.I32, 5)
oprot.writeI32(self.compareOp)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 6)
oprot.writeBinary(self.value)
oprot.writeFieldEnd()
if self.rowMutations is not None:
oprot.writeFieldBegin('rowMutations', TType.STRUCT, 7)
self.rowMutations.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocolException(message='Required field qualifier is unset!')
if self.compareOp is None:
raise TProtocolException(message='Required field compareOp is unset!')
if self.rowMutations is None:
raise TProtocolException(message='Required field rowMutations is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(checkAndMutate_args)
checkAndMutate_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.STRING, 'family', 'BINARY', None, ), # 3
(4, TType.STRING, 'qualifier', 'BINARY', None, ), # 4
(5, TType.I32, 'compareOp', None, None, ), # 5
(6, TType.STRING, 'value', 'BINARY', None, ), # 6
(7, TType.STRUCT, 'rowMutations', [TRowMutations, None], None, ), # 7
)
class checkAndMutate_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('checkAndMutate_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(checkAndMutate_result)
checkAndMutate_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptor_args(object):
"""
Attributes:
- table: the tablename of the table to get tableDescriptor
"""
def __init__(self, table=None,):
self.table = table
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.table = TTableName()
self.table.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptor_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRUCT, 1)
self.table.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocolException(message='Required field table is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptor_args)
getTableDescriptor_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'table', [TTableName, None], None, ), # 1
)
class getTableDescriptor_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TTableDescriptor()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptor_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptor_result)
getTableDescriptor_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TTableDescriptor, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptors_args(object):
"""
Attributes:
- tables: the tablename list of the tables to get tableDescriptor
"""
def __init__(self, tables=None,):
self.tables = tables
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.tables = []
(_etype242, _size239) = iprot.readListBegin()
for _i243 in range(_size239):
_elem244 = TTableName()
_elem244.read(iprot)
self.tables.append(_elem244)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptors_args')
if self.tables is not None:
oprot.writeFieldBegin('tables', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.tables))
for iter245 in self.tables:
iter245.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tables is None:
raise TProtocolException(message='Required field tables is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptors_args)
getTableDescriptors_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'tables', (TType.STRUCT, [TTableName, None], False), None, ), # 1
)
class getTableDescriptors_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype249, _size246) = iprot.readListBegin()
for _i250 in range(_size246):
_elem251 = TTableDescriptor()
_elem251.read(iprot)
self.success.append(_elem251)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptors_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter252 in self.success:
iter252.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptors_result)
getTableDescriptors_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class tableExists_args(object):
"""
Attributes:
- tableName: the tablename of the tables to check
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('tableExists_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(tableExists_args)
tableExists_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class tableExists_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('tableExists_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(tableExists_result)
tableExists_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptorsByPattern_args(object):
"""
Attributes:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
def __init__(self, regex=None, includeSysTables=None,):
self.regex = regex
self.includeSysTables = includeSysTables
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.regex = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.includeSysTables = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptorsByPattern_args')
if self.regex is not None:
oprot.writeFieldBegin('regex', TType.STRING, 1)
oprot.writeString(self.regex.encode('utf-8') if sys.version_info[0] == 2 else self.regex)
oprot.writeFieldEnd()
if self.includeSysTables is not None:
oprot.writeFieldBegin('includeSysTables', TType.BOOL, 2)
oprot.writeBool(self.includeSysTables)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.includeSysTables is None:
raise TProtocolException(message='Required field includeSysTables is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptorsByPattern_args)
getTableDescriptorsByPattern_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'regex', 'UTF8', None, ), # 1
(2, TType.BOOL, 'includeSysTables', None, None, ), # 2
)
class getTableDescriptorsByPattern_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype256, _size253) = iprot.readListBegin()
for _i257 in range(_size253):
_elem258 = TTableDescriptor()
_elem258.read(iprot)
self.success.append(_elem258)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptorsByPattern_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter259 in self.success:
iter259.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptorsByPattern_result)
getTableDescriptorsByPattern_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptorsByNamespace_args(object):
"""
Attributes:
- name: The namesapce's name
"""
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptorsByNamespace_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptorsByNamespace_args)
getTableDescriptorsByNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class getTableDescriptorsByNamespace_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype263, _size260) = iprot.readListBegin()
for _i264 in range(_size260):
_elem265 = TTableDescriptor()
_elem265.read(iprot)
self.success.append(_elem265)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableDescriptorsByNamespace_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter266 in self.success:
iter266.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableDescriptorsByNamespace_result)
getTableDescriptorsByNamespace_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableNamesByPattern_args(object):
"""
Attributes:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
def __init__(self, regex=None, includeSysTables=None,):
self.regex = regex
self.includeSysTables = includeSysTables
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.regex = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.includeSysTables = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableNamesByPattern_args')
if self.regex is not None:
oprot.writeFieldBegin('regex', TType.STRING, 1)
oprot.writeString(self.regex.encode('utf-8') if sys.version_info[0] == 2 else self.regex)
oprot.writeFieldEnd()
if self.includeSysTables is not None:
oprot.writeFieldBegin('includeSysTables', TType.BOOL, 2)
oprot.writeBool(self.includeSysTables)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.includeSysTables is None:
raise TProtocolException(message='Required field includeSysTables is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableNamesByPattern_args)
getTableNamesByPattern_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'regex', 'UTF8', None, ), # 1
(2, TType.BOOL, 'includeSysTables', None, None, ), # 2
)
class getTableNamesByPattern_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype270, _size267) = iprot.readListBegin()
for _i271 in range(_size267):
_elem272 = TTableName()
_elem272.read(iprot)
self.success.append(_elem272)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableNamesByPattern_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter273 in self.success:
iter273.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableNamesByPattern_result)
getTableNamesByPattern_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableName, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableNamesByNamespace_args(object):
"""
Attributes:
- name: The namesapce's name
"""
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableNamesByNamespace_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableNamesByNamespace_args)
getTableNamesByNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class getTableNamesByNamespace_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype277, _size274) = iprot.readListBegin()
for _i278 in range(_size274):
_elem279 = TTableName()
_elem279.read(iprot)
self.success.append(_elem279)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getTableNamesByNamespace_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter280 in self.success:
iter280.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getTableNamesByNamespace_result)
getTableNamesByNamespace_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableName, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class createTable_args(object):
"""
Attributes:
- desc: table descriptor for table
- splitKeys: rray of split keys for the initial regions of the table
"""
def __init__(self, desc=None, splitKeys=None,):
self.desc = desc
self.splitKeys = splitKeys
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.desc = TTableDescriptor()
self.desc.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.splitKeys = []
(_etype284, _size281) = iprot.readListBegin()
for _i285 in range(_size281):
_elem286 = iprot.readBinary()
self.splitKeys.append(_elem286)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createTable_args')
if self.desc is not None:
oprot.writeFieldBegin('desc', TType.STRUCT, 1)
self.desc.write(oprot)
oprot.writeFieldEnd()
if self.splitKeys is not None:
oprot.writeFieldBegin('splitKeys', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.splitKeys))
for iter287 in self.splitKeys:
oprot.writeBinary(iter287)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.desc is None:
raise TProtocolException(message='Required field desc is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createTable_args)
createTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'desc', [TTableDescriptor, None], None, ), # 1
(2, TType.LIST, 'splitKeys', (TType.STRING, 'BINARY', False), None, ), # 2
)
class createTable_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createTable_result)
createTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteTable_args(object):
"""
Attributes:
- tableName: the tablename to delete
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteTable_args)
deleteTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class deleteTable_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteTable_result)
deleteTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class truncateTable_args(object):
"""
Attributes:
- tableName: the tablename to truncate
- preserveSplits: whether to preserve previous splits
"""
def __init__(self, tableName=None, preserveSplits=None,):
self.tableName = tableName
self.preserveSplits = preserveSplits
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.preserveSplits = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('truncateTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
if self.preserveSplits is not None:
oprot.writeFieldBegin('preserveSplits', TType.BOOL, 2)
oprot.writeBool(self.preserveSplits)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
if self.preserveSplits is None:
raise TProtocolException(message='Required field preserveSplits is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(truncateTable_args)
truncateTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.BOOL, 'preserveSplits', None, None, ), # 2
)
class truncateTable_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('truncateTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(truncateTable_result)
truncateTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class enableTable_args(object):
"""
Attributes:
- tableName: the tablename to enable
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('enableTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(enableTable_args)
enableTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class enableTable_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('enableTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(enableTable_result)
enableTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class disableTable_args(object):
"""
Attributes:
- tableName: the tablename to disable
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('disableTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(disableTable_args)
disableTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class disableTable_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('disableTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(disableTable_result)
disableTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableEnabled_args(object):
"""
Attributes:
- tableName: the tablename to check
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableEnabled_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableEnabled_args)
isTableEnabled_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class isTableEnabled_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableEnabled_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableEnabled_result)
isTableEnabled_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableDisabled_args(object):
"""
Attributes:
- tableName: the tablename to check
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableDisabled_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableDisabled_args)
isTableDisabled_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class isTableDisabled_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableDisabled_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableDisabled_result)
isTableDisabled_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableAvailable_args(object):
"""
Attributes:
- tableName: the tablename to check
"""
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableAvailable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableAvailable_args)
isTableAvailable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class isTableAvailable_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableAvailable_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableAvailable_result)
isTableAvailable_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableAvailableWithSplit_args(object):
"""
Attributes:
- tableName: the tablename to check
- splitKeys: keys to check if the table has been created with all split keys
"""
def __init__(self, tableName=None, splitKeys=None,):
self.tableName = tableName
self.splitKeys = splitKeys
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.splitKeys = []
(_etype291, _size288) = iprot.readListBegin()
for _i292 in range(_size288):
_elem293 = iprot.readBinary()
self.splitKeys.append(_elem293)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableAvailableWithSplit_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
if self.splitKeys is not None:
oprot.writeFieldBegin('splitKeys', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.splitKeys))
for iter294 in self.splitKeys:
oprot.writeBinary(iter294)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableAvailableWithSplit_args)
isTableAvailableWithSplit_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.LIST, 'splitKeys', (TType.STRING, 'BINARY', False), None, ), # 2
)
class isTableAvailableWithSplit_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('isTableAvailableWithSplit_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(isTableAvailableWithSplit_result)
isTableAvailableWithSplit_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class addColumnFamily_args(object):
"""
Attributes:
- tableName: the tablename to add column family to
- column: column family descriptor of column family to be added
"""
def __init__(self, tableName=None, column=None,):
self.tableName = tableName
self.column = column
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column = TColumnFamilyDescriptor()
self.column.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('addColumnFamily_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRUCT, 2)
self.column.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
if self.column is None:
raise TProtocolException(message='Required field column is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(addColumnFamily_args)
addColumnFamily_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.STRUCT, 'column', [TColumnFamilyDescriptor, None], None, ), # 2
)
class addColumnFamily_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('addColumnFamily_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(addColumnFamily_result)
addColumnFamily_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteColumnFamily_args(object):
"""
Attributes:
- tableName: the tablename to delete column family from
- column: name of column family to be deleted
"""
def __init__(self, tableName=None, column=None,):
self.tableName = tableName
self.column = column
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.column = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteColumnFamily_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 2)
oprot.writeBinary(self.column)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
if self.column is None:
raise TProtocolException(message='Required field column is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteColumnFamily_args)
deleteColumnFamily_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.STRING, 'column', 'BINARY', None, ), # 2
)
class deleteColumnFamily_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteColumnFamily_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteColumnFamily_result)
deleteColumnFamily_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class modifyColumnFamily_args(object):
"""
Attributes:
- tableName: the tablename to modify column family
- column: column family descriptor of column family to be modified
"""
def __init__(self, tableName=None, column=None,):
self.tableName = tableName
self.column = column
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column = TColumnFamilyDescriptor()
self.column.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('modifyColumnFamily_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRUCT, 2)
self.column.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
if self.column is None:
raise TProtocolException(message='Required field column is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(modifyColumnFamily_args)
modifyColumnFamily_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.STRUCT, 'column', [TColumnFamilyDescriptor, None], None, ), # 2
)
class modifyColumnFamily_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('modifyColumnFamily_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(modifyColumnFamily_result)
modifyColumnFamily_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class modifyTable_args(object):
"""
Attributes:
- desc: the descriptor of the table to modify
"""
def __init__(self, desc=None,):
self.desc = desc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.desc = TTableDescriptor()
self.desc.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('modifyTable_args')
if self.desc is not None:
oprot.writeFieldBegin('desc', TType.STRUCT, 1)
self.desc.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.desc is None:
raise TProtocolException(message='Required field desc is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(modifyTable_args)
modifyTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'desc', [TTableDescriptor, None], None, ), # 1
)
class modifyTable_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('modifyTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(modifyTable_result)
modifyTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class createNamespace_args(object):
"""
Attributes:
- namespaceDesc: descriptor which describes the new namespace
"""
def __init__(self, namespaceDesc=None,):
self.namespaceDesc = namespaceDesc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.namespaceDesc = TNamespaceDescriptor()
self.namespaceDesc.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createNamespace_args')
if self.namespaceDesc is not None:
oprot.writeFieldBegin('namespaceDesc', TType.STRUCT, 1)
self.namespaceDesc.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.namespaceDesc is None:
raise TProtocolException(message='Required field namespaceDesc is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createNamespace_args)
createNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'namespaceDesc', [TNamespaceDescriptor, None], None, ), # 1
)
class createNamespace_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createNamespace_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createNamespace_result)
createNamespace_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class modifyNamespace_args(object):
"""
Attributes:
- namespaceDesc: descriptor which describes the new namespace
"""
def __init__(self, namespaceDesc=None,):
self.namespaceDesc = namespaceDesc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.namespaceDesc = TNamespaceDescriptor()
self.namespaceDesc.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('modifyNamespace_args')
if self.namespaceDesc is not None:
oprot.writeFieldBegin('namespaceDesc', TType.STRUCT, 1)
self.namespaceDesc.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.namespaceDesc is None:
raise TProtocolException(message='Required field namespaceDesc is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(modifyNamespace_args)
modifyNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'namespaceDesc', [TNamespaceDescriptor, None], None, ), # 1
)
class modifyNamespace_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('modifyNamespace_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(modifyNamespace_result)
modifyNamespace_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteNamespace_args(object):
"""
Attributes:
- name: namespace name
"""
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteNamespace_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteNamespace_args)
deleteNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class deleteNamespace_result(object):
"""
Attributes:
- io
"""
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deleteNamespace_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deleteNamespace_result)
deleteNamespace_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getNamespaceDescriptor_args(object):
"""
Attributes:
- name: name of namespace descriptor
"""
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getNamespaceDescriptor_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getNamespaceDescriptor_args)
getNamespaceDescriptor_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class getNamespaceDescriptor_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TNamespaceDescriptor()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getNamespaceDescriptor_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getNamespaceDescriptor_result)
getNamespaceDescriptor_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TNamespaceDescriptor, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class listNamespaceDescriptors_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('listNamespaceDescriptors_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(listNamespaceDescriptors_args)
listNamespaceDescriptors_args.thrift_spec = (
)
class listNamespaceDescriptors_result(object):
"""
Attributes:
- success
- io
"""
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype298, _size295) = iprot.readListBegin()
for _i299 in range(_size295):
_elem300 = TNamespaceDescriptor()
_elem300.read(iprot)
self.success.append(_elem300)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('listNamespaceDescriptors_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter301 in self.success:
iter301.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(listNamespaceDescriptors_result)
listNamespaceDescriptors_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TNamespaceDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs | /rspyutils-0.2.1.4.tar.gz/rspyutils-0.2.1.4/pyutils/bigdata/hbases/thrift2/hbase/THBaseService.py | 0.663124 | 0.548492 | THBaseService.py | pypi |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class TDeleteType(object):
"""
Specify type of delete:
- DELETE_COLUMN means exactly one version will be removed,
- DELETE_COLUMNS means previous versions will also be removed.
"""
DELETE_COLUMN = 0
DELETE_COLUMNS = 1
DELETE_FAMILY = 2
DELETE_FAMILY_VERSION = 3
_VALUES_TO_NAMES = {
0: "DELETE_COLUMN",
1: "DELETE_COLUMNS",
2: "DELETE_FAMILY",
3: "DELETE_FAMILY_VERSION",
}
_NAMES_TO_VALUES = {
"DELETE_COLUMN": 0,
"DELETE_COLUMNS": 1,
"DELETE_FAMILY": 2,
"DELETE_FAMILY_VERSION": 3,
}
class TDurability(object):
"""
Specify Durability:
- SKIP_WAL means do not write the Mutation to the WAL.
- ASYNC_WAL means write the Mutation to the WAL asynchronously,
- SYNC_WAL means write the Mutation to the WAL synchronously,
- FSYNC_WAL means Write the Mutation to the WAL synchronously and force the entries to disk.
"""
USE_DEFAULT = 0
SKIP_WAL = 1
ASYNC_WAL = 2
SYNC_WAL = 3
FSYNC_WAL = 4
_VALUES_TO_NAMES = {
0: "USE_DEFAULT",
1: "SKIP_WAL",
2: "ASYNC_WAL",
3: "SYNC_WAL",
4: "FSYNC_WAL",
}
_NAMES_TO_VALUES = {
"USE_DEFAULT": 0,
"SKIP_WAL": 1,
"ASYNC_WAL": 2,
"SYNC_WAL": 3,
"FSYNC_WAL": 4,
}
class TConsistency(object):
"""
Specify Consistency:
- STRONG means reads only from primary region
- TIMELINE means reads might return values from secondary region replicas
"""
STRONG = 1
TIMELINE = 2
_VALUES_TO_NAMES = {
1: "STRONG",
2: "TIMELINE",
}
_NAMES_TO_VALUES = {
"STRONG": 1,
"TIMELINE": 2,
}
class TReadType(object):
DEFAULT = 1
STREAM = 2
PREAD = 3
_VALUES_TO_NAMES = {
1: "DEFAULT",
2: "STREAM",
3: "PREAD",
}
_NAMES_TO_VALUES = {
"DEFAULT": 1,
"STREAM": 2,
"PREAD": 3,
}
class TCompareOp(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.filter.CompareFilter$CompareOp.
"""
LESS = 0
LESS_OR_EQUAL = 1
EQUAL = 2
NOT_EQUAL = 3
GREATER_OR_EQUAL = 4
GREATER = 5
NO_OP = 6
_VALUES_TO_NAMES = {
0: "LESS",
1: "LESS_OR_EQUAL",
2: "EQUAL",
3: "NOT_EQUAL",
4: "GREATER_OR_EQUAL",
5: "GREATER",
6: "NO_OP",
}
_NAMES_TO_VALUES = {
"LESS": 0,
"LESS_OR_EQUAL": 1,
"EQUAL": 2,
"NOT_EQUAL": 3,
"GREATER_OR_EQUAL": 4,
"GREATER": 5,
"NO_OP": 6,
}
class TBloomFilterType(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.regionserver.BloomType
"""
NONE = 0
ROW = 1
ROWCOL = 2
ROWPREFIX_FIXED_LENGTH = 3
_VALUES_TO_NAMES = {
0: "NONE",
1: "ROW",
2: "ROWCOL",
3: "ROWPREFIX_FIXED_LENGTH",
}
_NAMES_TO_VALUES = {
"NONE": 0,
"ROW": 1,
"ROWCOL": 2,
"ROWPREFIX_FIXED_LENGTH": 3,
}
class TCompressionAlgorithm(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.io.compress.Algorithm
"""
LZO = 0
GZ = 1
NONE = 2
SNAPPY = 3
LZ4 = 4
BZIP2 = 5
ZSTD = 6
_VALUES_TO_NAMES = {
0: "LZO",
1: "GZ",
2: "NONE",
3: "SNAPPY",
4: "LZ4",
5: "BZIP2",
6: "ZSTD",
}
_NAMES_TO_VALUES = {
"LZO": 0,
"GZ": 1,
"NONE": 2,
"SNAPPY": 3,
"LZ4": 4,
"BZIP2": 5,
"ZSTD": 6,
}
class TDataBlockEncoding(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
"""
NONE = 0
PREFIX = 2
DIFF = 3
FAST_DIFF = 4
ROW_INDEX_V1 = 7
_VALUES_TO_NAMES = {
0: "NONE",
2: "PREFIX",
3: "DIFF",
4: "FAST_DIFF",
7: "ROW_INDEX_V1",
}
_NAMES_TO_VALUES = {
"NONE": 0,
"PREFIX": 2,
"DIFF": 3,
"FAST_DIFF": 4,
"ROW_INDEX_V1": 7,
}
class TKeepDeletedCells(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.KeepDeletedCells
"""
FALSE = 0
TRUE = 1
TTL = 2
_VALUES_TO_NAMES = {
0: "FALSE",
1: "TRUE",
2: "TTL",
}
_NAMES_TO_VALUES = {
"FALSE": 0,
"TRUE": 1,
"TTL": 2,
}
class TTimeRange(object):
"""
Attributes:
- minStamp
- maxStamp
"""
def __init__(self, minStamp=None, maxStamp=None,):
self.minStamp = minStamp
self.maxStamp = maxStamp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.minStamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.maxStamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TTimeRange')
if self.minStamp is not None:
oprot.writeFieldBegin('minStamp', TType.I64, 1)
oprot.writeI64(self.minStamp)
oprot.writeFieldEnd()
if self.maxStamp is not None:
oprot.writeFieldBegin('maxStamp', TType.I64, 2)
oprot.writeI64(self.maxStamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.minStamp is None:
raise TProtocolException(message='Required field minStamp is unset!')
if self.maxStamp is None:
raise TProtocolException(message='Required field maxStamp is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumn(object):
"""
Addresses a single cell or multiple cells
in a HBase table by column family and optionally
a column qualifier and timestamp
Attributes:
- family
- qualifier
- timestamp
"""
def __init__(self, family=None, qualifier=None, timestamp=None,):
self.family = family
self.qualifier = qualifier
self.timestamp = timestamp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TColumn')
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 1)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 2)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumnValue(object):
"""
Represents a single cell and its value.
Attributes:
- family
- qualifier
- value
- timestamp
- tags
- type
"""
def __init__(self, family=None, qualifier=None, value=None, timestamp=None, tags=None, type=None,):
self.family = family
self.qualifier = qualifier
self.value = value
self.timestamp = timestamp
self.tags = tags
self.type = type
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.tags = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BYTE:
self.type = iprot.readByte()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TColumnValue')
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 1)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 2)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeBinary(self.value)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.tags is not None:
oprot.writeFieldBegin('tags', TType.STRING, 5)
oprot.writeBinary(self.tags)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.BYTE, 6)
oprot.writeByte(self.type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocolException(message='Required field qualifier is unset!')
if self.value is None:
raise TProtocolException(message='Required field value is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumnIncrement(object):
"""
Represents a single cell and the amount to increment it by
Attributes:
- family
- qualifier
- amount
"""
def __init__(self, family=None, qualifier=None, amount=1,):
self.family = family
self.qualifier = qualifier
self.amount = amount
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.amount = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TColumnIncrement')
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 1)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 2)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.amount is not None:
oprot.writeFieldBegin('amount', TType.I64, 3)
oprot.writeI64(self.amount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocolException(message='Required field qualifier is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResult(object):
"""
if no Result is found, row and columnValues will not be set.
Attributes:
- row
- columnValues
- stale
- partial
"""
def __init__(self, row=None, columnValues=None, stale=False, partial=False,):
self.row = row
self.columnValues = columnValues
self.stale = stale
self.partial = partial
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columnValues = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = TColumnValue()
_elem5.read(iprot)
self.columnValues.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.stale = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.partial = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TResult')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.columnValues is not None:
oprot.writeFieldBegin('columnValues', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columnValues))
for iter6 in self.columnValues:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.stale is not None:
oprot.writeFieldBegin('stale', TType.BOOL, 3)
oprot.writeBool(self.stale)
oprot.writeFieldEnd()
if self.partial is not None:
oprot.writeFieldBegin('partial', TType.BOOL, 4)
oprot.writeBool(self.partial)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.columnValues is None:
raise TProtocolException(message='Required field columnValues is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAuthorization(object):
"""
Attributes:
- labels
"""
def __init__(self, labels=None,):
self.labels = labels
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.labels = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.labels.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TAuthorization')
if self.labels is not None:
oprot.writeFieldBegin('labels', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.labels))
for iter13 in self.labels:
oprot.writeString(iter13.encode('utf-8') if sys.version_info[0] == 2 else iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TCellVisibility(object):
"""
Attributes:
- expression
"""
def __init__(self, expression=None,):
self.expression = expression
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.expression = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TCellVisibility')
if self.expression is not None:
oprot.writeFieldBegin('expression', TType.STRING, 1)
oprot.writeString(self.expression.encode('utf-8') if sys.version_info[0] == 2 else self.expression)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TGet(object):
"""
Used to perform Get operations on a single row.
The scope can be further narrowed down by specifying a list of
columns or column families.
To get everything for a row, instantiate a Get object with just the row to get.
To further define the scope of what to get you can add a timestamp or time range
with an optional maximum number of versions to return.
If you specify a time range and a timestamp the range is ignored.
Timestamps on TColumns are ignored.
Attributes:
- row
- columns
- timestamp
- timeRange
- maxVersions
- filterString
- attributes
- authorizations
- consistency
- targetReplicaId
- cacheBlocks
- storeLimit
- storeOffset
- existence_only
- filterBytes
"""
def __init__(self, row=None, columns=None, timestamp=None, timeRange=None, maxVersions=None, filterString=None, attributes=None, authorizations=None, consistency=None, targetReplicaId=None, cacheBlocks=None, storeLimit=None, storeOffset=None, existence_only=None, filterBytes=None,):
self.row = row
self.columns = columns
self.timestamp = timestamp
self.timeRange = timeRange
self.maxVersions = maxVersions
self.filterString = filterString
self.attributes = attributes
self.authorizations = authorizations
self.consistency = consistency
self.targetReplicaId = targetReplicaId
self.cacheBlocks = cacheBlocks
self.storeLimit = storeLimit
self.storeOffset = storeOffset
self.existence_only = existence_only
self.filterBytes = filterBytes
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columns = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = TColumn()
_elem19.read(iprot)
self.columns.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.timeRange = TTimeRange()
self.timeRange.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.maxVersions = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.filterString = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.MAP:
self.attributes = {}
(_ktype21, _vtype22, _size20) = iprot.readMapBegin()
for _i24 in range(_size20):
_key25 = iprot.readBinary()
_val26 = iprot.readBinary()
self.attributes[_key25] = _val26
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.authorizations = TAuthorization()
self.authorizations.read(iprot)
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.consistency = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.targetReplicaId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.cacheBlocks = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I32:
self.storeLimit = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I32:
self.storeOffset = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.BOOL:
self.existence_only = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.STRING:
self.filterBytes = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TGet')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter27 in self.columns:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.timeRange is not None:
oprot.writeFieldBegin('timeRange', TType.STRUCT, 4)
self.timeRange.write(oprot)
oprot.writeFieldEnd()
if self.maxVersions is not None:
oprot.writeFieldBegin('maxVersions', TType.I32, 5)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.filterString is not None:
oprot.writeFieldBegin('filterString', TType.STRING, 6)
oprot.writeBinary(self.filterString)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 7)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter28, viter29 in self.attributes.items():
oprot.writeBinary(kiter28)
oprot.writeBinary(viter29)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.authorizations is not None:
oprot.writeFieldBegin('authorizations', TType.STRUCT, 8)
self.authorizations.write(oprot)
oprot.writeFieldEnd()
if self.consistency is not None:
oprot.writeFieldBegin('consistency', TType.I32, 9)
oprot.writeI32(self.consistency)
oprot.writeFieldEnd()
if self.targetReplicaId is not None:
oprot.writeFieldBegin('targetReplicaId', TType.I32, 10)
oprot.writeI32(self.targetReplicaId)
oprot.writeFieldEnd()
if self.cacheBlocks is not None:
oprot.writeFieldBegin('cacheBlocks', TType.BOOL, 11)
oprot.writeBool(self.cacheBlocks)
oprot.writeFieldEnd()
if self.storeLimit is not None:
oprot.writeFieldBegin('storeLimit', TType.I32, 12)
oprot.writeI32(self.storeLimit)
oprot.writeFieldEnd()
if self.storeOffset is not None:
oprot.writeFieldBegin('storeOffset', TType.I32, 13)
oprot.writeI32(self.storeOffset)
oprot.writeFieldEnd()
if self.existence_only is not None:
oprot.writeFieldBegin('existence_only', TType.BOOL, 14)
oprot.writeBool(self.existence_only)
oprot.writeFieldEnd()
if self.filterBytes is not None:
oprot.writeFieldBegin('filterBytes', TType.STRING, 15)
oprot.writeBinary(self.filterBytes)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TPut(object):
"""
Used to perform Put operations for a single row.
Add column values to this object and they'll be added.
You can provide a default timestamp if the column values
don't have one. If you don't provide a default timestamp
the current time is inserted.
You can specify how this Put should be written to the write-ahead Log (WAL)
by changing the durability. If you don't provide durability, it defaults to
column family's default setting for durability.
Attributes:
- row
- columnValues
- timestamp
- attributes
- durability
- cellVisibility
"""
def __init__(self, row=None, columnValues=None, timestamp=None, attributes=None, durability=None, cellVisibility=None,):
self.row = row
self.columnValues = columnValues
self.timestamp = timestamp
self.attributes = attributes
self.durability = durability
self.cellVisibility = cellVisibility
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columnValues = []
(_etype33, _size30) = iprot.readListBegin()
for _i34 in range(_size30):
_elem35 = TColumnValue()
_elem35.read(iprot)
self.columnValues.append(_elem35)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype37, _vtype38, _size36) = iprot.readMapBegin()
for _i40 in range(_size36):
_key41 = iprot.readBinary()
_val42 = iprot.readBinary()
self.attributes[_key41] = _val42
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.durability = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.cellVisibility = TCellVisibility()
self.cellVisibility.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TPut')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.columnValues is not None:
oprot.writeFieldBegin('columnValues', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columnValues))
for iter43 in self.columnValues:
iter43.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter44, viter45 in self.attributes.items():
oprot.writeBinary(kiter44)
oprot.writeBinary(viter45)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.durability is not None:
oprot.writeFieldBegin('durability', TType.I32, 6)
oprot.writeI32(self.durability)
oprot.writeFieldEnd()
if self.cellVisibility is not None:
oprot.writeFieldBegin('cellVisibility', TType.STRUCT, 7)
self.cellVisibility.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.columnValues is None:
raise TProtocolException(message='Required field columnValues is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDelete(object):
"""
Used to perform Delete operations on a single row.
The scope can be further narrowed down by specifying a list of
columns or column families as TColumns.
Specifying only a family in a TColumn will delete the whole family.
If a timestamp is specified all versions with a timestamp less than
or equal to this will be deleted. If no timestamp is specified the
current time will be used.
Specifying a family and a column qualifier in a TColumn will delete only
this qualifier. If a timestamp is specified only versions equal
to this timestamp will be deleted. If no timestamp is specified the
most recent version will be deleted. To delete all previous versions,
specify the DELETE_COLUMNS TDeleteType.
The top level timestamp is only used if a complete row should be deleted
(i.e. no columns are passed) and if it is specified it works the same way
as if you had added a TColumn for every column family and this timestamp
(i.e. all versions older than or equal in all column families will be deleted)
You can specify how this Delete should be written to the write-ahead Log (WAL)
by changing the durability. If you don't provide durability, it defaults to
column family's default setting for durability.
Attributes:
- row
- columns
- timestamp
- deleteType
- attributes
- durability
"""
def __init__(self, row=None, columns=None, timestamp=None, deleteType=1, attributes=None, durability=None,):
self.row = row
self.columns = columns
self.timestamp = timestamp
self.deleteType = deleteType
self.attributes = attributes
self.durability = durability
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columns = []
(_etype49, _size46) = iprot.readListBegin()
for _i50 in range(_size46):
_elem51 = TColumn()
_elem51.read(iprot)
self.columns.append(_elem51)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.deleteType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.attributes = {}
(_ktype53, _vtype54, _size52) = iprot.readMapBegin()
for _i56 in range(_size52):
_key57 = iprot.readBinary()
_val58 = iprot.readBinary()
self.attributes[_key57] = _val58
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.durability = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TDelete')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter59 in self.columns:
iter59.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.deleteType is not None:
oprot.writeFieldBegin('deleteType', TType.I32, 4)
oprot.writeI32(self.deleteType)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter60, viter61 in self.attributes.items():
oprot.writeBinary(kiter60)
oprot.writeBinary(viter61)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.durability is not None:
oprot.writeFieldBegin('durability', TType.I32, 7)
oprot.writeI32(self.durability)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIncrement(object):
"""
Used to perform Increment operations for a single row.
You can specify how this Increment should be written to the write-ahead Log (WAL)
by changing the durability. If you don't provide durability, it defaults to
column family's default setting for durability.
Attributes:
- row
- columns
- attributes
- durability
- cellVisibility
- returnResults
"""
def __init__(self, row=None, columns=None, attributes=None, durability=None, cellVisibility=None, returnResults=None,):
self.row = row
self.columns = columns
self.attributes = attributes
self.durability = durability
self.cellVisibility = cellVisibility
self.returnResults = returnResults
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columns = []
(_etype65, _size62) = iprot.readListBegin()
for _i66 in range(_size62):
_elem67 = TColumnIncrement()
_elem67.read(iprot)
self.columns.append(_elem67)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype69, _vtype70, _size68) = iprot.readMapBegin()
for _i72 in range(_size68):
_key73 = iprot.readBinary()
_val74 = iprot.readBinary()
self.attributes[_key73] = _val74
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.durability = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.cellVisibility = TCellVisibility()
self.cellVisibility.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.returnResults = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TIncrement')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter75 in self.columns:
iter75.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter76, viter77 in self.attributes.items():
oprot.writeBinary(kiter76)
oprot.writeBinary(viter77)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.durability is not None:
oprot.writeFieldBegin('durability', TType.I32, 5)
oprot.writeI32(self.durability)
oprot.writeFieldEnd()
if self.cellVisibility is not None:
oprot.writeFieldBegin('cellVisibility', TType.STRUCT, 6)
self.cellVisibility.write(oprot)
oprot.writeFieldEnd()
if self.returnResults is not None:
oprot.writeFieldBegin('returnResults', TType.BOOL, 7)
oprot.writeBool(self.returnResults)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.columns is None:
raise TProtocolException(message='Required field columns is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAppend(object):
"""
Attributes:
- row
- columns
- attributes
- durability
- cellVisibility
- returnResults
"""
def __init__(self, row=None, columns=None, attributes=None, durability=None, cellVisibility=None, returnResults=None,):
self.row = row
self.columns = columns
self.attributes = attributes
self.durability = durability
self.cellVisibility = cellVisibility
self.returnResults = returnResults
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columns = []
(_etype81, _size78) = iprot.readListBegin()
for _i82 in range(_size78):
_elem83 = TColumnValue()
_elem83.read(iprot)
self.columns.append(_elem83)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype85, _vtype86, _size84) = iprot.readMapBegin()
for _i88 in range(_size84):
_key89 = iprot.readBinary()
_val90 = iprot.readBinary()
self.attributes[_key89] = _val90
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.durability = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.cellVisibility = TCellVisibility()
self.cellVisibility.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.returnResults = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TAppend')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter91 in self.columns:
iter91.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter92, viter93 in self.attributes.items():
oprot.writeBinary(kiter92)
oprot.writeBinary(viter93)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.durability is not None:
oprot.writeFieldBegin('durability', TType.I32, 4)
oprot.writeI32(self.durability)
oprot.writeFieldEnd()
if self.cellVisibility is not None:
oprot.writeFieldBegin('cellVisibility', TType.STRUCT, 5)
self.cellVisibility.write(oprot)
oprot.writeFieldEnd()
if self.returnResults is not None:
oprot.writeFieldBegin('returnResults', TType.BOOL, 6)
oprot.writeBool(self.returnResults)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.columns is None:
raise TProtocolException(message='Required field columns is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TScan(object):
"""
Any timestamps in the columns are ignored but the colFamTimeRangeMap included, use timeRange to select by timestamp.
Max versions defaults to 1.
Attributes:
- startRow
- stopRow
- columns
- caching
- maxVersions
- timeRange
- filterString
- batchSize
- attributes
- authorizations
- reversed
- cacheBlocks
- colFamTimeRangeMap
- readType
- limit
- consistency
- targetReplicaId
- filterBytes
"""
def __init__(self, startRow=None, stopRow=None, columns=None, caching=None, maxVersions=1, timeRange=None, filterString=None, batchSize=None, attributes=None, authorizations=None, reversed=None, cacheBlocks=None, colFamTimeRangeMap=None, readType=None, limit=None, consistency=None, targetReplicaId=None, filterBytes=None,):
self.startRow = startRow
self.stopRow = stopRow
self.columns = columns
self.caching = caching
self.maxVersions = maxVersions
self.timeRange = timeRange
self.filterString = filterString
self.batchSize = batchSize
self.attributes = attributes
self.authorizations = authorizations
self.reversed = reversed
self.cacheBlocks = cacheBlocks
self.colFamTimeRangeMap = colFamTimeRangeMap
self.readType = readType
self.limit = limit
self.consistency = consistency
self.targetReplicaId = targetReplicaId
self.filterBytes = filterBytes
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startRow = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stopRow = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype97, _size94) = iprot.readListBegin()
for _i98 in range(_size94):
_elem99 = TColumn()
_elem99.read(iprot)
self.columns.append(_elem99)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.caching = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.maxVersions = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.timeRange = TTimeRange()
self.timeRange.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.filterString = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.batchSize = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.MAP:
self.attributes = {}
(_ktype101, _vtype102, _size100) = iprot.readMapBegin()
for _i104 in range(_size100):
_key105 = iprot.readBinary()
_val106 = iprot.readBinary()
self.attributes[_key105] = _val106
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRUCT:
self.authorizations = TAuthorization()
self.authorizations.read(iprot)
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.reversed = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.cacheBlocks = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.MAP:
self.colFamTimeRangeMap = {}
(_ktype108, _vtype109, _size107) = iprot.readMapBegin()
for _i111 in range(_size107):
_key112 = iprot.readBinary()
_val113 = TTimeRange()
_val113.read(iprot)
self.colFamTimeRangeMap[_key112] = _val113
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I32:
self.readType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I32:
self.limit = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I32:
self.consistency = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.I32:
self.targetReplicaId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.STRING:
self.filterBytes = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TScan')
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 1)
oprot.writeBinary(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 2)
oprot.writeBinary(self.stopRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter114 in self.columns:
iter114.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.caching is not None:
oprot.writeFieldBegin('caching', TType.I32, 4)
oprot.writeI32(self.caching)
oprot.writeFieldEnd()
if self.maxVersions is not None:
oprot.writeFieldBegin('maxVersions', TType.I32, 5)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.timeRange is not None:
oprot.writeFieldBegin('timeRange', TType.STRUCT, 6)
self.timeRange.write(oprot)
oprot.writeFieldEnd()
if self.filterString is not None:
oprot.writeFieldBegin('filterString', TType.STRING, 7)
oprot.writeBinary(self.filterString)
oprot.writeFieldEnd()
if self.batchSize is not None:
oprot.writeFieldBegin('batchSize', TType.I32, 8)
oprot.writeI32(self.batchSize)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 9)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter115, viter116 in self.attributes.items():
oprot.writeBinary(kiter115)
oprot.writeBinary(viter116)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.authorizations is not None:
oprot.writeFieldBegin('authorizations', TType.STRUCT, 10)
self.authorizations.write(oprot)
oprot.writeFieldEnd()
if self.reversed is not None:
oprot.writeFieldBegin('reversed', TType.BOOL, 11)
oprot.writeBool(self.reversed)
oprot.writeFieldEnd()
if self.cacheBlocks is not None:
oprot.writeFieldBegin('cacheBlocks', TType.BOOL, 12)
oprot.writeBool(self.cacheBlocks)
oprot.writeFieldEnd()
if self.colFamTimeRangeMap is not None:
oprot.writeFieldBegin('colFamTimeRangeMap', TType.MAP, 13)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.colFamTimeRangeMap))
for kiter117, viter118 in self.colFamTimeRangeMap.items():
oprot.writeBinary(kiter117)
viter118.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.readType is not None:
oprot.writeFieldBegin('readType', TType.I32, 14)
oprot.writeI32(self.readType)
oprot.writeFieldEnd()
if self.limit is not None:
oprot.writeFieldBegin('limit', TType.I32, 15)
oprot.writeI32(self.limit)
oprot.writeFieldEnd()
if self.consistency is not None:
oprot.writeFieldBegin('consistency', TType.I32, 16)
oprot.writeI32(self.consistency)
oprot.writeFieldEnd()
if self.targetReplicaId is not None:
oprot.writeFieldBegin('targetReplicaId', TType.I32, 17)
oprot.writeI32(self.targetReplicaId)
oprot.writeFieldEnd()
if self.filterBytes is not None:
oprot.writeFieldBegin('filterBytes', TType.STRING, 18)
oprot.writeBinary(self.filterBytes)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TMutation(object):
"""
Atomic mutation for the specified row. It can be either Put or Delete.
Attributes:
- put
- deleteSingle
"""
def __init__(self, put=None, deleteSingle=None,):
self.put = put
self.deleteSingle = deleteSingle
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.put = TPut()
self.put.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.deleteSingle = TDelete()
self.deleteSingle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TMutation')
if self.put is not None:
oprot.writeFieldBegin('put', TType.STRUCT, 1)
self.put.write(oprot)
oprot.writeFieldEnd()
if self.deleteSingle is not None:
oprot.writeFieldBegin('deleteSingle', TType.STRUCT, 2)
self.deleteSingle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRowMutations(object):
"""
A TRowMutations object is used to apply a number of Mutations to a single row.
Attributes:
- row
- mutations
"""
def __init__(self, row=None, mutations=None,):
self.row = row
self.mutations = mutations
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.mutations = []
(_etype122, _size119) = iprot.readListBegin()
for _i123 in range(_size119):
_elem124 = TMutation()
_elem124.read(iprot)
self.mutations.append(_elem124)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TRowMutations')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeBinary(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter125 in self.mutations:
iter125.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.row is None:
raise TProtocolException(message='Required field row is unset!')
if self.mutations is None:
raise TProtocolException(message='Required field mutations is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class THRegionInfo(object):
"""
Attributes:
- regionId
- tableName
- startKey
- endKey
- offline
- split
- replicaId
"""
def __init__(self, regionId=None, tableName=None, startKey=None, endKey=None, offline=None, split=None, replicaId=None,):
self.regionId = regionId
self.tableName = tableName
self.startKey = startKey
self.endKey = endKey
self.offline = offline
self.split = split
self.replicaId = replicaId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.regionId = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tableName = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.startKey = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.endKey = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.offline = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.split = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.replicaId = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('THRegionInfo')
if self.regionId is not None:
oprot.writeFieldBegin('regionId', TType.I64, 1)
oprot.writeI64(self.regionId)
oprot.writeFieldEnd()
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 2)
oprot.writeBinary(self.tableName)
oprot.writeFieldEnd()
if self.startKey is not None:
oprot.writeFieldBegin('startKey', TType.STRING, 3)
oprot.writeBinary(self.startKey)
oprot.writeFieldEnd()
if self.endKey is not None:
oprot.writeFieldBegin('endKey', TType.STRING, 4)
oprot.writeBinary(self.endKey)
oprot.writeFieldEnd()
if self.offline is not None:
oprot.writeFieldBegin('offline', TType.BOOL, 5)
oprot.writeBool(self.offline)
oprot.writeFieldEnd()
if self.split is not None:
oprot.writeFieldBegin('split', TType.BOOL, 6)
oprot.writeBool(self.split)
oprot.writeFieldEnd()
if self.replicaId is not None:
oprot.writeFieldBegin('replicaId', TType.I32, 7)
oprot.writeI32(self.replicaId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.regionId is None:
raise TProtocolException(message='Required field regionId is unset!')
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TServerName(object):
"""
Attributes:
- hostName
- port
- startCode
"""
def __init__(self, hostName=None, port=None, startCode=None,):
self.hostName = hostName
self.port = port
self.startCode = startCode
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.hostName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.port = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.startCode = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TServerName')
if self.hostName is not None:
oprot.writeFieldBegin('hostName', TType.STRING, 1)
oprot.writeString(self.hostName.encode('utf-8') if sys.version_info[0] == 2 else self.hostName)
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 2)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
if self.startCode is not None:
oprot.writeFieldBegin('startCode', TType.I64, 3)
oprot.writeI64(self.startCode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.hostName is None:
raise TProtocolException(message='Required field hostName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class THRegionLocation(object):
"""
Attributes:
- serverName
- regionInfo
"""
def __init__(self, serverName=None, regionInfo=None,):
self.serverName = serverName
self.regionInfo = regionInfo
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.serverName = TServerName()
self.serverName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.regionInfo = THRegionInfo()
self.regionInfo.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('THRegionLocation')
if self.serverName is not None:
oprot.writeFieldBegin('serverName', TType.STRUCT, 1)
self.serverName.write(oprot)
oprot.writeFieldEnd()
if self.regionInfo is not None:
oprot.writeFieldBegin('regionInfo', TType.STRUCT, 2)
self.regionInfo.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.serverName is None:
raise TProtocolException(message='Required field serverName is unset!')
if self.regionInfo is None:
raise TProtocolException(message='Required field regionInfo is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TTableName(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.TableName
Attributes:
- ns: namespace name
- qualifier: tablename
"""
def __init__(self, ns=None, qualifier=None,):
self.ns = ns
self.qualifier = qualifier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ns = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TTableName')
if self.ns is not None:
oprot.writeFieldBegin('ns', TType.STRING, 1)
oprot.writeBinary(self.ns)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 2)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.qualifier is None:
raise TProtocolException(message='Required field qualifier is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumnFamilyDescriptor(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor
Attributes:
- name
- attributes
- configuration
- blockSize
- bloomnFilterType
- compressionType
- dfsReplication
- dataBlockEncoding
- keepDeletedCells
- maxVersions
- minVersions
- scope
- timeToLive
- blockCacheEnabled
- cacheBloomsOnWrite
- cacheDataOnWrite
- cacheIndexesOnWrite
- compressTags
- evictBlocksOnClose
- inMemory
"""
def __init__(self, name=None, attributes=None, configuration=None, blockSize=None, bloomnFilterType=None, compressionType=None, dfsReplication=None, dataBlockEncoding=None, keepDeletedCells=None, maxVersions=None, minVersions=None, scope=None, timeToLive=None, blockCacheEnabled=None, cacheBloomsOnWrite=None, cacheDataOnWrite=None, cacheIndexesOnWrite=None, compressTags=None, evictBlocksOnClose=None, inMemory=None,):
self.name = name
self.attributes = attributes
self.configuration = configuration
self.blockSize = blockSize
self.bloomnFilterType = bloomnFilterType
self.compressionType = compressionType
self.dfsReplication = dfsReplication
self.dataBlockEncoding = dataBlockEncoding
self.keepDeletedCells = keepDeletedCells
self.maxVersions = maxVersions
self.minVersions = minVersions
self.scope = scope
self.timeToLive = timeToLive
self.blockCacheEnabled = blockCacheEnabled
self.cacheBloomsOnWrite = cacheBloomsOnWrite
self.cacheDataOnWrite = cacheDataOnWrite
self.cacheIndexesOnWrite = cacheIndexesOnWrite
self.compressTags = compressTags
self.evictBlocksOnClose = evictBlocksOnClose
self.inMemory = inMemory
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.attributes = {}
(_ktype127, _vtype128, _size126) = iprot.readMapBegin()
for _i130 in range(_size126):
_key131 = iprot.readBinary()
_val132 = iprot.readBinary()
self.attributes[_key131] = _val132
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.configuration = {}
(_ktype134, _vtype135, _size133) = iprot.readMapBegin()
for _i137 in range(_size133):
_key138 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val139 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.configuration[_key138] = _val139
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.blockSize = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.bloomnFilterType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.compressionType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I16:
self.dfsReplication = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.dataBlockEncoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.keepDeletedCells = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.maxVersions = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I32:
self.minVersions = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I32:
self.scope = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I32:
self.timeToLive = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.BOOL:
self.blockCacheEnabled = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.BOOL:
self.cacheBloomsOnWrite = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.BOOL:
self.cacheDataOnWrite = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.BOOL:
self.cacheIndexesOnWrite = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.BOOL:
self.compressTags = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.BOOL:
self.evictBlocksOnClose = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.BOOL:
self.inMemory = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TColumnFamilyDescriptor')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeBinary(self.name)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter140, viter141 in self.attributes.items():
oprot.writeBinary(kiter140)
oprot.writeBinary(viter141)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.configuration is not None:
oprot.writeFieldBegin('configuration', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.configuration))
for kiter142, viter143 in self.configuration.items():
oprot.writeString(kiter142.encode('utf-8') if sys.version_info[0] == 2 else kiter142)
oprot.writeString(viter143.encode('utf-8') if sys.version_info[0] == 2 else viter143)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.blockSize is not None:
oprot.writeFieldBegin('blockSize', TType.I32, 4)
oprot.writeI32(self.blockSize)
oprot.writeFieldEnd()
if self.bloomnFilterType is not None:
oprot.writeFieldBegin('bloomnFilterType', TType.I32, 5)
oprot.writeI32(self.bloomnFilterType)
oprot.writeFieldEnd()
if self.compressionType is not None:
oprot.writeFieldBegin('compressionType', TType.I32, 6)
oprot.writeI32(self.compressionType)
oprot.writeFieldEnd()
if self.dfsReplication is not None:
oprot.writeFieldBegin('dfsReplication', TType.I16, 7)
oprot.writeI16(self.dfsReplication)
oprot.writeFieldEnd()
if self.dataBlockEncoding is not None:
oprot.writeFieldBegin('dataBlockEncoding', TType.I32, 8)
oprot.writeI32(self.dataBlockEncoding)
oprot.writeFieldEnd()
if self.keepDeletedCells is not None:
oprot.writeFieldBegin('keepDeletedCells', TType.I32, 9)
oprot.writeI32(self.keepDeletedCells)
oprot.writeFieldEnd()
if self.maxVersions is not None:
oprot.writeFieldBegin('maxVersions', TType.I32, 10)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.minVersions is not None:
oprot.writeFieldBegin('minVersions', TType.I32, 11)
oprot.writeI32(self.minVersions)
oprot.writeFieldEnd()
if self.scope is not None:
oprot.writeFieldBegin('scope', TType.I32, 12)
oprot.writeI32(self.scope)
oprot.writeFieldEnd()
if self.timeToLive is not None:
oprot.writeFieldBegin('timeToLive', TType.I32, 13)
oprot.writeI32(self.timeToLive)
oprot.writeFieldEnd()
if self.blockCacheEnabled is not None:
oprot.writeFieldBegin('blockCacheEnabled', TType.BOOL, 14)
oprot.writeBool(self.blockCacheEnabled)
oprot.writeFieldEnd()
if self.cacheBloomsOnWrite is not None:
oprot.writeFieldBegin('cacheBloomsOnWrite', TType.BOOL, 15)
oprot.writeBool(self.cacheBloomsOnWrite)
oprot.writeFieldEnd()
if self.cacheDataOnWrite is not None:
oprot.writeFieldBegin('cacheDataOnWrite', TType.BOOL, 16)
oprot.writeBool(self.cacheDataOnWrite)
oprot.writeFieldEnd()
if self.cacheIndexesOnWrite is not None:
oprot.writeFieldBegin('cacheIndexesOnWrite', TType.BOOL, 17)
oprot.writeBool(self.cacheIndexesOnWrite)
oprot.writeFieldEnd()
if self.compressTags is not None:
oprot.writeFieldBegin('compressTags', TType.BOOL, 18)
oprot.writeBool(self.compressTags)
oprot.writeFieldEnd()
if self.evictBlocksOnClose is not None:
oprot.writeFieldBegin('evictBlocksOnClose', TType.BOOL, 19)
oprot.writeBool(self.evictBlocksOnClose)
oprot.writeFieldEnd()
if self.inMemory is not None:
oprot.writeFieldBegin('inMemory', TType.BOOL, 20)
oprot.writeBool(self.inMemory)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TTableDescriptor(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.client.TableDescriptor
Attributes:
- tableName
- columns
- attributes
- durability
"""
def __init__(self, tableName=None, columns=None, attributes=None, durability=None,):
self.tableName = tableName
self.columns = columns
self.attributes = attributes
self.durability = durability
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tableName = TTableName()
self.tableName.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columns = []
(_etype147, _size144) = iprot.readListBegin()
for _i148 in range(_size144):
_elem149 = TColumnFamilyDescriptor()
_elem149.read(iprot)
self.columns.append(_elem149)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype151, _vtype152, _size150) = iprot.readMapBegin()
for _i154 in range(_size150):
_key155 = iprot.readBinary()
_val156 = iprot.readBinary()
self.attributes[_key155] = _val156
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.durability = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TTableDescriptor')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRUCT, 1)
self.tableName.write(oprot)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter157 in self.columns:
iter157.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter158, viter159 in self.attributes.items():
oprot.writeBinary(kiter158)
oprot.writeBinary(viter159)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.durability is not None:
oprot.writeFieldBegin('durability', TType.I32, 4)
oprot.writeI32(self.durability)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableName is None:
raise TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TNamespaceDescriptor(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.NamespaceDescriptor
Attributes:
- name
- configuration
"""
def __init__(self, name=None, configuration=None,):
self.name = name
self.configuration = configuration
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.configuration = {}
(_ktype161, _vtype162, _size160) = iprot.readMapBegin()
for _i164 in range(_size160):
_key165 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val166 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.configuration[_key165] = _val166
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TNamespaceDescriptor')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.configuration is not None:
oprot.writeFieldBegin('configuration', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.configuration))
for kiter167, viter168 in self.configuration.items():
oprot.writeString(kiter167.encode('utf-8') if sys.version_info[0] == 2 else kiter167)
oprot.writeString(viter168.encode('utf-8') if sys.version_info[0] == 2 else viter168)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIOError(TException):
"""
A TIOError exception signals that an error occurred communicating
to the HBase master or a HBase region server. Also used to return
more general HBase error conditions.
Attributes:
- message
"""
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TIOError')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIllegalArgument(TException):
"""
A TIllegalArgument exception indicates an illegal or invalid
argument was passed into a procedure.
Attributes:
- message
"""
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TIllegalArgument')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(TTimeRange)
TTimeRange.thrift_spec = (
None, # 0
(1, TType.I64, 'minStamp', None, None, ), # 1
(2, TType.I64, 'maxStamp', None, None, ), # 2
)
all_structs.append(TColumn)
TColumn.thrift_spec = (
None, # 0
(1, TType.STRING, 'family', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
)
all_structs.append(TColumnValue)
TColumnValue.thrift_spec = (
None, # 0
(1, TType.STRING, 'family', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
(3, TType.STRING, 'value', 'BINARY', None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.STRING, 'tags', 'BINARY', None, ), # 5
(6, TType.BYTE, 'type', None, None, ), # 6
)
all_structs.append(TColumnIncrement)
TColumnIncrement.thrift_spec = (
None, # 0
(1, TType.STRING, 'family', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
(3, TType.I64, 'amount', None, 1, ), # 3
)
all_structs.append(TResult)
TResult.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columnValues', (TType.STRUCT, [TColumnValue, None], False), None, ), # 2
(3, TType.BOOL, 'stale', None, False, ), # 3
(4, TType.BOOL, 'partial', None, False, ), # 4
)
all_structs.append(TAuthorization)
TAuthorization.thrift_spec = (
None, # 0
(1, TType.LIST, 'labels', (TType.STRING, 'UTF8', False), None, ), # 1
)
all_structs.append(TCellVisibility)
TCellVisibility.thrift_spec = (
None, # 0
(1, TType.STRING, 'expression', 'UTF8', None, ), # 1
)
all_structs.append(TGet)
TGet.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumn, None], False), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.STRUCT, 'timeRange', [TTimeRange, None], None, ), # 4
(5, TType.I32, 'maxVersions', None, None, ), # 5
(6, TType.STRING, 'filterString', 'BINARY', None, ), # 6
(7, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 7
(8, TType.STRUCT, 'authorizations', [TAuthorization, None], None, ), # 8
(9, TType.I32, 'consistency', None, None, ), # 9
(10, TType.I32, 'targetReplicaId', None, None, ), # 10
(11, TType.BOOL, 'cacheBlocks', None, None, ), # 11
(12, TType.I32, 'storeLimit', None, None, ), # 12
(13, TType.I32, 'storeOffset', None, None, ), # 13
(14, TType.BOOL, 'existence_only', None, None, ), # 14
(15, TType.STRING, 'filterBytes', 'BINARY', None, ), # 15
)
all_structs.append(TPut)
TPut.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columnValues', (TType.STRUCT, [TColumnValue, None], False), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
None, # 4
(5, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 5
(6, TType.I32, 'durability', None, None, ), # 6
(7, TType.STRUCT, 'cellVisibility', [TCellVisibility, None], None, ), # 7
)
all_structs.append(TDelete)
TDelete.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumn, None], False), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.I32, 'deleteType', None, 1, ), # 4
None, # 5
(6, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 6
(7, TType.I32, 'durability', None, None, ), # 7
)
all_structs.append(TIncrement)
TIncrement.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumnIncrement, None], False), None, ), # 2
None, # 3
(4, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 4
(5, TType.I32, 'durability', None, None, ), # 5
(6, TType.STRUCT, 'cellVisibility', [TCellVisibility, None], None, ), # 6
(7, TType.BOOL, 'returnResults', None, None, ), # 7
)
all_structs.append(TAppend)
TAppend.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumnValue, None], False), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 3
(4, TType.I32, 'durability', None, None, ), # 4
(5, TType.STRUCT, 'cellVisibility', [TCellVisibility, None], None, ), # 5
(6, TType.BOOL, 'returnResults', None, None, ), # 6
)
all_structs.append(TScan)
TScan.thrift_spec = (
None, # 0
(1, TType.STRING, 'startRow', 'BINARY', None, ), # 1
(2, TType.STRING, 'stopRow', 'BINARY', None, ), # 2
(3, TType.LIST, 'columns', (TType.STRUCT, [TColumn, None], False), None, ), # 3
(4, TType.I32, 'caching', None, None, ), # 4
(5, TType.I32, 'maxVersions', None, 1, ), # 5
(6, TType.STRUCT, 'timeRange', [TTimeRange, None], None, ), # 6
(7, TType.STRING, 'filterString', 'BINARY', None, ), # 7
(8, TType.I32, 'batchSize', None, None, ), # 8
(9, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 9
(10, TType.STRUCT, 'authorizations', [TAuthorization, None], None, ), # 10
(11, TType.BOOL, 'reversed', None, None, ), # 11
(12, TType.BOOL, 'cacheBlocks', None, None, ), # 12
(13, TType.MAP, 'colFamTimeRangeMap', (TType.STRING, 'BINARY', TType.STRUCT, [TTimeRange, None], False), None, ), # 13
(14, TType.I32, 'readType', None, None, ), # 14
(15, TType.I32, 'limit', None, None, ), # 15
(16, TType.I32, 'consistency', None, None, ), # 16
(17, TType.I32, 'targetReplicaId', None, None, ), # 17
(18, TType.STRING, 'filterBytes', 'BINARY', None, ), # 18
)
all_structs.append(TMutation)
TMutation.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'put', [TPut, None], None, ), # 1
(2, TType.STRUCT, 'deleteSingle', [TDelete, None], None, ), # 2
)
all_structs.append(TRowMutations)
TRowMutations.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'mutations', (TType.STRUCT, [TMutation, None], False), None, ), # 2
)
all_structs.append(THRegionInfo)
THRegionInfo.thrift_spec = (
None, # 0
(1, TType.I64, 'regionId', None, None, ), # 1
(2, TType.STRING, 'tableName', 'BINARY', None, ), # 2
(3, TType.STRING, 'startKey', 'BINARY', None, ), # 3
(4, TType.STRING, 'endKey', 'BINARY', None, ), # 4
(5, TType.BOOL, 'offline', None, None, ), # 5
(6, TType.BOOL, 'split', None, None, ), # 6
(7, TType.I32, 'replicaId', None, None, ), # 7
)
all_structs.append(TServerName)
TServerName.thrift_spec = (
None, # 0
(1, TType.STRING, 'hostName', 'UTF8', None, ), # 1
(2, TType.I32, 'port', None, None, ), # 2
(3, TType.I64, 'startCode', None, None, ), # 3
)
all_structs.append(THRegionLocation)
THRegionLocation.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'serverName', [TServerName, None], None, ), # 1
(2, TType.STRUCT, 'regionInfo', [THRegionInfo, None], None, ), # 2
)
all_structs.append(TTableName)
TTableName.thrift_spec = (
None, # 0
(1, TType.STRING, 'ns', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
)
all_structs.append(TColumnFamilyDescriptor)
TColumnFamilyDescriptor.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'BINARY', None, ), # 1
(2, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 2
(3, TType.MAP, 'configuration', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.I32, 'blockSize', None, None, ), # 4
(5, TType.I32, 'bloomnFilterType', None, None, ), # 5
(6, TType.I32, 'compressionType', None, None, ), # 6
(7, TType.I16, 'dfsReplication', None, None, ), # 7
(8, TType.I32, 'dataBlockEncoding', None, None, ), # 8
(9, TType.I32, 'keepDeletedCells', None, None, ), # 9
(10, TType.I32, 'maxVersions', None, None, ), # 10
(11, TType.I32, 'minVersions', None, None, ), # 11
(12, TType.I32, 'scope', None, None, ), # 12
(13, TType.I32, 'timeToLive', None, None, ), # 13
(14, TType.BOOL, 'blockCacheEnabled', None, None, ), # 14
(15, TType.BOOL, 'cacheBloomsOnWrite', None, None, ), # 15
(16, TType.BOOL, 'cacheDataOnWrite', None, None, ), # 16
(17, TType.BOOL, 'cacheIndexesOnWrite', None, None, ), # 17
(18, TType.BOOL, 'compressTags', None, None, ), # 18
(19, TType.BOOL, 'evictBlocksOnClose', None, None, ), # 19
(20, TType.BOOL, 'inMemory', None, None, ), # 20
)
all_structs.append(TTableDescriptor)
TTableDescriptor.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumnFamilyDescriptor, None], False), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 3
(4, TType.I32, 'durability', None, None, ), # 4
)
all_structs.append(TNamespaceDescriptor)
TNamespaceDescriptor.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.MAP, 'configuration', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2
)
all_structs.append(TIOError)
TIOError.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
all_structs.append(TIllegalArgument)
TIllegalArgument.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
fix_spec(all_structs)
del all_structs | /rspyutils-0.2.1.4.tar.gz/rspyutils-0.2.1.4/pyutils/bigdata/hbases/thrift2/hbase/ttypes.py | 0.483405 | 0.262097 | ttypes.py | pypi |
import json
import math
import datetime
import numpy as np
import pandas as pd
from pyutils.time import dates
from pyutils.tool.list import get_flat_list
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
class FeaturesEncode(object):
def __init__(self):
pass
def get_df_cols(self, df: pd.DataFrame):
return set(df.columns.tolist())
def get_nan_transform(self, df: pd.DataFrame, cols: list, type="category", value=0, method="default"):
"""
缺省值处理,当前只支持默认值,后面需要加上其他处理方式
:param df: DataFrame
:param cols: 字段
:param type: 字段数值类型
:param method: nan处理方式
:return:
"""
if not set(cols).issubset(self.get_df_cols(df)):
raise ValueError("nan transform field input not found in DataFrame!!!")
if method == "default":
if type == "category":
type, value = "object", str(value)
elif type == "numerical":
type, value = "float32", float(value)
elif type == "time":
type, value = "object", "1970-01-01 00:00:00"
else:
raise ValueError("nan transform type input %s is not support!!!" % type)
elif method == "linear":
if type == "numerical":
type, value = "float32", float(value)
for f in cols:
df[f] = df[f].interpolate(method='linear', axis=0)
else:
raise ValueError("nan transform only support numerical when method is linear!!!")
else:
raise ValueError("scaler transform field input %s not found in DataFrame!!!" % str(method))
df = df.astype({x: type for x in cols})
df.fillna({x: value for x in cols}, inplace=True)
return df
def get_scaler_transform(self, df: pd.DataFrame, field, is_log=False, base=math.exp(-1), method="normalize"):
"""
get_scaler_transform 数值型特征数据归一化
:param df: DataFrame
:param field: 字段
:param is_log: 是否取log
:param method: 归一化方式
:return:
"""
if field not in self.get_df_cols(df):
raise ValueError("scaler transform field input %s not found in DataFrame!!!" % str(field))
if is_log:
df[field] = df[field].parallel_apply(lambda x: math.log(max(x, base)))
if "normalize" == method:
norm = MinMaxScaler().fit_transform(df[field].values.astype(np.float64).reshape(-1, 1))
df[field] = norm.astype(np.float64).reshape(-1)
elif "standard" == method:
norm = StandardScaler().fit_transform(df[field].values.astype(np.float64).reshape(-1, 1))
df[field] = norm.astype(np.float64).reshape(-1)
else:
raise ValueError("scaler transform method %s input error!!!" % str(method))
return df
def get_discrete_transform(self, df: pd.DataFrame, field, block=10):
"""
get_discrete_transform 连续值离散化
:param df: DataFrame
:param field: 字段
:param block: 分段
:return:
"""
if field not in self.get_df_cols(df):
raise ValueError("discrete transform field input %s not found in DataFrame!!!" % str(field))
df[field] = df[field].multiply(block).map(int).map(str)
return df
def get_time2category_transform(self, df: pd.DataFrame, field, method="timestamp", dim="day"):
"""
get_time2category_transform 时间特征处理
:param df: DataFrame
:param field: 字段
:param method: 数据分析方式
:param dim: 维度
:return:
"""
if field not in self.get_df_cols(df):
raise ValueError("time transform field input %s not found in DataFrame!!!" % str(field))
if method == "timestamp":
if dim == "sec":
df[field] = df[field].parallel_apply(lambda x: dates.str_to_timestamp(x))
elif dim == "day":
df[field] = df[field].parallel_apply(lambda x: dates.get_date_from_str(x).day)
elif dim == "month":
df[field] = df[field].parallel_apply(lambda x: dates.get_date_from_str(x).month)
else:
raise ValueError("time transform dim input %s error!!!" % str(dim))
elif method == "timediff":
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if dim == "sec":
df[field] = df[field].parallel_apply(lambda x: dates.get_date_diff(start_date=str(x),
end_date=now_time).seconds)
elif dim == "day":
df[field] = df[field].parallel_apply(lambda x: dates.get_date_diff(start_date=str(x),
end_date=now_time).days)
elif dim == "month":
df[field] = df[field].parallel_apply(lambda x: int(dates.get_date_diff(start_date=str(x),
end_date=now_time).days / 30.0))
else:
raise ValueError("time transform method input %s error!!!" % str(method))
else:
raise ValueError("time2category transform method input error!!!")
return df
def get_singular_truncation_transform(self, df: pd.DataFrame, field, thr=None, method="min"):
"""
get_singular_truncation_transform 浮点值奇异值校正
:param df: DataFrame
:param field: 字段
:param thr: 阈值
:param method: 方式
:return:
"""
if field not in self.get_df_cols(df):
raise ValueError("singular transform field input %s not found in DataFrame!!!" % str(field))
if not thr:
return df
if method == "min":
df[field] = df[field].parallel_apply(lambda x: min(x, thr))
else:
df[field] = df[field].parallel_apply(lambda x: max(x, thr))
return df
def get_category_type_transform(self, df: pd.DataFrame, field, is_int: False):
"""
get_category_type_transform
:param df: DataFrame
:param field: 字段
:param is_int: 是否需要转成int
:return:
"""
if field not in self.get_df_cols(df):
raise ValueError("category type transform field input %s not found in DataFrame!!!" % str(field))
if df[field].dtype == 'float64' or df[field].dtype == 'float32' or is_int:
df[field] = df[field].map(int).map(str)
df[field] = df[field].astype("category")
return df
def get_json2category_transform(self, df: pd.DataFrame, cols: list):
"""
json特征处理 json to category
:param df:
:param cols:
:return:
"""
def func(d):
try:
return "|".join(json.loads(d).keys())
except:
return ""
if not cols:
return df
for k in cols:
df[k] = df.parallel_apply(lambda x: func(x[k]), axis=1)
return df
def get_vector2category_transform(self, df: pd.DataFrame, cols: list, is_drop=True):
"""
多维特征处理(暂时弃用)
:param df:
:param cols:
:param is_drop:
:return:
"""
drop_features = []
category_features_gen = []
if not cols:
return []
df_vector = df
for v in cols:
if v in self.get_df_cols(df):
dfTmp = df_vector[v].str.get_dummies(sep='|')
dfTmp.rename(columns=lambda x: v + "|" + x, inplace=True)
df_vector = df_vector.join(dfTmp)
drop_features.append(v)
category_features_gen.append(dfTmp.columns.tolist())
del dfTmp
if is_drop:
df_vector.drop(drop_features, axis=1, inplace=True)
return df_vector, category_features_gen
def get_vector2value_transform(self, df: pd.DataFrame, cols: list, is_drop=True):
"""
多维特征处理
:param df:
:param cols:
:param is_drop:
:return:
"""
drop_features = []
value_features_gen = []
if not cols:
return df, []
df_vector = df
for v in cols:
if v in self.get_df_cols(df):
val = np.array([str.split(x, ",") for x in df[v].values])
index = [v + ":" + str(i) for i in range(1, len(val) + 1)]
dfTmp = pd.DataFrame(val, index=index)
df_vector = df_vector.join(dfTmp)
drop_features.append(v)
value_features_gen.append(dfTmp.columns.tolist())
del dfTmp
if is_drop:
df_vector.drop(drop_features, axis=1, inplace=True)
return df_vector, value_features_gen
# 构造交叉特征
def get_cross_transform(self, df: pd.DataFrame, field1, field2, vector_col=None):
"""
get_cross_transform
:param df:
:param field1:
:param field2:
:param vector_col:
:return:
"""
def func1(df: pd.DataFrame, col1: str, col2: str):
return '|'.join(['{}*{}'.format(x, df[col2]) for x in str.split(df[col1], "|")])
def func2(df: pd.DataFrame, col1: str, col2: str):
return '|'.join(['{}*{}'.format(x, y) for x in str.split(df[col1], "|")
for y in str.split(df[col2], "|")])
if field1 not in self.get_df_cols(df) or field2 not in self.get_df_cols(df):
raise ValueError("cross transform field input %s or %s not found in DataFrame!!!" % (field1, field2))
new_field = '{}*{}'.format(field1, field2)
if field1 in set(vector_col) and field2 in set(vector_col):
df[new_field] = df.parallel_apply(lambda x: func2(x, field1, field2), axis=1)
elif field1 in set(vector_col):
df[new_field] = df.parallel_apply(lambda x: func1(x, field1, field2), axis=1)
elif field2 in set(vector_col):
df[new_field] = df.parallel_apply(lambda x: func1(x, field2, field1), axis=1)
else:
df[new_field] = df[field1].astype("object").map(str) + \
"*" + \
df[field2].astype("object").map(str)
df[new_field] = df[new_field].astype("category")
return df
def gen_features_id_map(self, df: pd.DataFrame, feature_id: dict, vector, label="label"):
"""
gen_features_id_map
:param df:
:param feature_id:
:param vector:
:param label:
:return:
"""
cols = self.get_df_cols(df)
cols.remove(label)
feature_id_list = []
for c in cols:
if c in vector:
feature_id_list.extend(["{}:{}".format(c, x) for d in df[c]
for x in set(get_flat_list([str.split(d, "|")]))])
else:
feature_id_list.extend(["{}:{}".format(c, x) for x in list(df[c].drop_duplicates())])
max_len = max(feature_id.values()) if feature_id else 0
for x in feature_id_list:
if not feature_id.get(x, None):
max_len += 1
feature_id[x] = max_len
return feature_id | /rspyutils-0.2.1.4.tar.gz/rspyutils-0.2.1.4/pyutils/feature/encodes.py | 0.464173 | 0.470737 | encodes.py | pypi |
import re
RSR_TYPE_PATTERN = '(%s[a-zA-Z0-9]*)?'
class InvalidParameterError(Exception):
"""Raised to signal an encounter with a syntactically invalid parameter.
"""
class RouteParameterizationIrreversibleError(Exception):
"""Raised to signal an error while attempting to reverse a route due
an unsupplied required parameter."""
class RSRReverser(object):
"""A Rails-style route reverser.
Attributes:
option_bounds (str): The characters signaling the beginning and end
of a route option.
param_bounds (str): The characters signaling the beginning and end
of a route parameter.
param_separator (str): The character signaling the separator between
the parameter's name and type.
NOTE:
The option_bounds, param_bounds, and param_separator must all be
regex-safe strings.
"""
option_bounds = '[]'
param_bounds = '{}'
param_separator = ':'
def __init__(self, route, option_bounds=None, param_bounds=None,
param_separator=None):
"""Constructs a new RSRReverser.
Args:
option_bounds (str): @see RSRReverser::option_bounds.
param_bounds (str): @see RSRReverser::param_bounds.
param_separator (str): @see RSRReverser::param_separator.
"""
self._route = route
self.option_bounds = self.pick('option_bounds', option_bounds)
self.param_bounds = self.pick('param_bounds', param_bounds)
self.param_separator = self.pick('param_separator', param_separator)
self._param_pattern = self.extrapolate_param_pattern()
def pick(self, attr, val):
"""Gets the matching class attribute from RSRReverser if val is None.
Args:
attr (str): The name of the attribute to select from RSRReverser.
val (var): Any value.
Returns (var):
If val is None, returns the matching class attribute from
RSRReverser. Otherwise, returns val.
"""
val = val if val else getattr(RSRReverser, attr)
return val
def set_route(self, route):
"""Sets THIS RSRReverser's route.
Args:
route (str): A Rails-style route.
"""
self._route = route
def get_route(self):
"""Gets THIS RSRReverser's route.
Returns (str):
THIS RSRReverser's route.
"""
return self._route
def extrapolate_param_pattern(self):
"""Extrapolates the regular expression to be used to match parameters
based on THIS RSRReverser's :param_separator: and :param_bounds:.
Returns (str):
The regular expression to be used to match parameters.
"""
type_pattern = RSR_TYPE_PATTERN % self.param_separator
param_pattern = '%s%%s%s%s' % (self.param_bounds[0],
type_pattern,
self.param_bounds[1])
return param_pattern
def get_option_start(self, route=None):
"""Gets the starting position of the FIRST option in the :route:.
Args:
route (str|None): The route to search--or None to search THIS
RSRReverser's route.
Returns (int):
The starting position of the FIRST option or -1 if none exists.
"""
route = route if route else self.get_route()
start = route.find(self.option_bounds[0])
end = route.find(self.option_bounds[1])
if end == -1:
return -1
if end < start:
return -1
return start
def get_option_end(self, route=None):
"""Gets the ending position of the FIRST option in the :route:.
Args:
route (str|None): The route to search--or None to search THIS
RSRReverser's route.
Returns (int):
The ending position of the FIRST option or -1 if none exists.
"""
route = route if route else self.get_route()
start = self.get_option_start(route)
if start == -1:
return -1
pos = start
opts = 1
while opts > 0:
pos += 1
if pos >= len(route):
return -1
char = route[pos]
if char not in self.option_bounds:
continue
if char == self.option_bounds[0]:
opts += 1
if char == self.option_bounds[1]:
opts -= 1
return pos
def get_option(self, route=None):
"""Gets the FIRST option in the :route:.
Args:
route (str|None): The route to search--or None to search THIS
RSRReverser's route.
Returns (str):
The first option in the :route: or '' if none exists.
example:
:route: '/eg[/{option1}]/sep[/{option2}]' -> '[/{option1}]'
:route: '/eg/no/options' -> ''
"""
route = route if route else self.get_route()
op_start = self.get_option_start(route)
op_end = self.get_option_end(route)
if op_start == -1 or op_end == -1:
return ''
if op_end < op_start:
return ''
return route[op_start:op_end + 1]
def clean_parameter(self, parameter):
"""Ensures that the :parameter: is syntactically valid and returns its
name.
Args:
parameter (str|None): A route parameter string.
Returns (str):
The :parameter:'s name or '' if the :parameter: is syntactically
invalid.
example:
:parameter: '{param}' -> 'param'
:parameter: '{param:digits}' -> 'param'
:parameter: '{param:invalid:type}' -> ''
"""
parts = parameter.split(self.param_separator)
if len(parts) == 1:
pass
elif len(parts) == 2:
parameter = parts[0]
else:
raise InvalidParameterError
if parameter == '':
raise InvalidParameterError
return parameter
def substitute_parameters(self, parameters, route=None):
"""Substitutes parameter values in place of parameter keys.
Args:
parameters (dict): A dictionary of parameter names / keys
and values.
route (str|None): The route to substitute values in place of keys
or None to use THIS RSRReverser's route.
Returns (str):
The route--with the parameter keys substituted by the parameter
values.
example:
:parameters: {
'p1': 'examples',
'p2': 'are',
'p3': 'useful',
}
:route: '/eg/{p1}/{p3}'
-> '/eg/examples/useful'
"""
substituted_route = route if route else self.get_route()
for param, value in parameters.iteritems():
pattern = self._param_pattern % re.escape(param)
matches = re.finditer(pattern, substituted_route)
for match in matches:
substituted_route = substituted_route.replace(match.group(),
value)
return substituted_route
def prune_options(self, parameters):
"""Prunes any options that cannot be replaced due to unsupplied
parameters.
Args:
parameters (dict): A dictionary of parameter names / keys
and values.
Returns:
THIS RSRReverer's route--with any unsubstitutable options pruned.
example:
:self.route: '/eg[/{o1}[/{o2}]]/s1[/{o3}[/{o4}]]/s2[/{o5}]'
:parameters: {
'o1': 'nest_1',
'o2': 'replaced',
'o4': 'nest_2_not',
'aside': 'neither_is_o5',
}
-> '/eg/{op1}/{op2}/s1/s2'
"""
route = self.get_route()
while True:
option = self.get_option(route)
if option == '':
return route
option_reverser = RSRReverser(option[1:-1],
option_bounds=self.option_bounds,
param_bounds=self.param_bounds,
param_separator=self.param_separator)
pruned_route = option_reverser.prune_options(parameters)
substituted_route = option_reverser.substitute_parameters(
parameters,
pruned_route)
if self.is_reversed(substituted_route):
route = route.replace(option, option[1:-1])
else:
route = route.replace(option, '')
def is_reversed(self, route=None):
"""Determines whether a :route: is reversed.
Args:
route (str|None): The route to substitute values in place of keys
or None to use THIS RSRReverser's route.
Returns (bool):
Whehter or not the route is reversed.
"""
route = route if route else self.get_route()
if route.find(self.option_bounds[0]) != -1:
return False
if route.find(self.option_bounds[1]) != -1:
return False
if route.find(self.param_bounds[0]) != -1:
return False
if route.find(self.param_bounds[1]) != -1:
return False
return True
def reverse(self, parameters):
"""Reverses a Rails-style route.
Args:
parameters (dict): A dictionary of parameter names / keys
and values.
Returns (str):
THIS RSRReverser's route--reversed given the :parameters:.
example:
:self.route: '/eg[/{o1}[/{o2}]]/s1[/{o3}[/{o4}]]/s2/{p1}'
:parameters: {
'o1': 'nest_1',
'o2': 'replaced',
'o4': 'nest_2_not',
'p1': 'param_must_be_supplied',
}
-> '/eg/nest_1/replaced/s1/s2/param_must_be_supplied'
:self.route: '/eg/{p1}'
:parameters: {
'aside': 'this_fials_param_must_be_supplied',
}
-> raises RouteParameterizationIrreversibleError
"""
pruned_route = self.prune_options(parameters)
reversed_route = self.substitute_parameters(parameters, pruned_route)
if not self.is_reversed(reversed_route):
raise RouteParameterizationIrreversibleError
return reversed_route | /rsr-reverse-0.1.1.tar.gz/rsr-reverse-0.1.1/rsr_reverse/reverser.py | 0.853425 | 0.430207 | reverser.py | pypi |
rsrc
====
[](https://travis-ci.com/lycantropos/rsrc "Travis CI")
[](https://dev.azure.com/lycantropos/rsrc/_build/latest?definitionId=4&branchName=master "Azure Pipelines")
[](https://codecov.io/gh/lycantropos/rsrc "Codecov")
[](https://github.com/lycantropos/rsrc/blob/master/LICENSE "License")
[](https://badge.fury.io/py/rsrc "PyPI")
In what follows
- `python` is an alias for `python3.5` or any later
version (`python3.6` and so on),
- `pypy` is an alias for `pypy3.5` or any later
version (`pypy3.6` and so on).
Installation
------------
Install the latest `pip` & `setuptools` packages versions:
- with `CPython`
```bash
python -m pip install --upgrade pip setuptools
```
- with `PyPy`
```bash
pypy -m pip install --upgrade pip setuptools
```
### User
Download and install the latest stable version from `PyPI` repository:
- with `CPython`
```bash
python -m pip install --upgrade rsrc
```
- with `PyPy`
```bash
pypy -m pip install --upgrade rsrc
```
### Developer
Download the latest version from `GitHub` repository
```bash
git clone https://github.com/lycantropos/rsrc.git
cd rsrc
```
Install dependencies:
- with `CPython`
```bash
python -m pip install --force-reinstall -r requirements.txt
```
- with `PyPy`
```bash
pypy -m pip install --force-reinstall -r requirements.txt
```
Install:
- with `CPython`
```bash
python setup.py install
```
- with `PyPy`
```bash
pypy setup.py install
```
Usage
-----
The main idea is to use `setuptools` feature
called ["Dynamic Discovery of Services and Plugins"](https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins).
Assuming we have a package `rsrc_ftp` with structure
|_ rsrc_ftp.py
|_ setup.py
which adds support for URLs with `ftp` scheme
`rsrc_ftp.py`
```python
from rsrc.models import Resource
def deserialize(string: str) -> Resource:
...
```
to make it available for `rsrc` package
we should register its entry point
(`rsrc_ftp::deserialize` function in our case)
`setup.py`
```python
from setuptools import setup
from rsrc import plugins
plugins_entry_points = [
plugins.to_entry_point(id_=plugins.to_id('ftp'),
module_name='rsrc_ftp',
function_name='deserialize'),
]
setup(name='rsrc_ftp',
py_modules=['rsrc_ftp'],
entry_points={plugins.__name__: plugins_entry_points},
install_requires=['rsrc'])
```
After that the installation of `rsrc_ftp` package
will register `rsrc_ftp::deserialize` function in `rsrc` package
as an entry point for resources with `ftp` scheme
```python
>>> from rsrc.base import deserialize
>>> ftp_resource = deserialize('ftp://path/to/resource')
>>> ftp_resource.url
URL('ftp', 'path', '/to/resource', '', '', '')
```
Plugins
-------
- [`rsrc_local`](https://pypi.org/project/rsrc_local) -- adds support for local/local network resources.
- [`rsrc_web`](https://pypi.org/project/rsrc_web) -- adds support for web resources (both `http` & `https` schemes).
Development
-----------
### Bumping version
#### Preparation
Install
[bump2version](https://github.com/c4urself/bump2version#installation).
#### Pre-release
Choose which version number category to bump following [semver
specification](http://semver.org/).
Test bumping version
```bash
bump2version --dry-run --verbose $CATEGORY
```
where `$CATEGORY` is the target version number category name, possible
values are `patch`/`minor`/`major`.
Bump version
```bash
bump2version --verbose $CATEGORY
```
This will set version to `major.minor.patch-alpha`.
#### Release
Test bumping version
```bash
bump2version --dry-run --verbose release
```
Bump version
```bash
bump2version --verbose release
```
This will set version to `major.minor.patch`.
#### Notes
To avoid inconsistency between branches and pull requests,
bumping version should be merged into `master` branch
as separate pull request.
### Running tests
Install dependencies:
- with `CPython`
```bash
python -m pip install --force-reinstall -r requirements-tests.txt
```
- with `PyPy`
```bash
pypy -m pip install --force-reinstall -r requirements-tests.txt
```
Plain
```bash
pytest
```
Inside `Docker` container:
- with `CPython`
```bash
docker-compose --file docker-compose.cpython.yml up
```
- with `PyPy`
```bash
docker-compose --file docker-compose.pypy.yml up
```
`Bash` script (e.g. can be used in `Git` hooks):
- with `CPython`
```bash
./run-tests.sh
```
or
```bash
./run-tests.sh cpython
```
- with `PyPy`
```bash
./run-tests.sh pypy
```
`PowerShell` script (e.g. can be used in `Git` hooks):
- with `CPython`
```powershell
.\run-tests.ps1
```
or
```powershell
.\run-tests.ps1 cpython
```
- with `PyPy`
```powershell
.\run-tests.ps1 pypy
```
| /rsrc-0.1.3.tar.gz/rsrc-0.1.3/README.md | 0.853104 | 0.925903 | README.md | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rss_distributions-0.1-py3-none-any.whl/rss_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
r"""
Module which includes logics on feeds' conversion to supported formats. Currently supported formats for
conversion: .html, .pdf, .epub. By default, converted files are stored in ..\Users\Username\rss_reader. However,
this can be changed by passing another directory path to appropriate console arguments --to-html, --to-pdf and
--to--epub. Converted file name will be 'news' followed by file extension.
"""
import logging
import os
import warnings
from pathlib import Path
from typing import List
from ebooklib import epub
from jinja2 import Template
from xhtml2pdf import pisa
from rss_news_reader.rss_builder import Feed
logger = logging.getLogger("rss-news-reader")
class Converter:
"""Class providing public convert method, which converts collected feeds to either of supported formats specified
by the provided console arguments: --to-html, --to-pdf, --to-epub."""
def __init__(self, fmt: dict[str, str]):
self.fmt = fmt
self.module_dir = Path(__file__).parent
def _get_html(self, **kwargs) -> str:
"""Provides a rendered html-template, which is represented as a string, for future usage in conversion to
.html or .pdf formats."""
template = Template(open(Path(self.module_dir, "html_template.jinja2")).read())
return template.render(**kwargs)
def _get_xhtml(self, **kwargs) -> str:
"""Provides a rendered xhtml-template, which is represented as a string, for future usage in conversion to
.epub format."""
template = Template(open(Path(self.module_dir, "xhtml_template.jinja2")).read())
return template.render(**kwargs)
def _to_html(self, feeds: List[Feed]) -> None:
"""Provides functionality to convert feeds to .html format."""
dir_path = self.fmt["html"]
file_path = Path(dir_path, "news.html")
try:
with open(file_path, "w", encoding="utf-8") as result_file:
result_file.write(
self._get_html(
feeds=feeds,
fonts=str(Path(Path(__file__).parent.resolve(), "fonts")),
)
)
except FileNotFoundError:
logger.warning(
f"Failed to save html file. Seems directory {dir_path} doesn't exist."
)
else:
logger.info(f"Saved html in {file_path}.")
def _to_pdf(self, feeds: List[Feed]) -> None:
"""Provides functionality to convert feeds to .pdf format."""
dir_path = self.fmt["pdf"]
file_path = Path(dir_path, "news.pdf")
try:
with open(file_path, "w+b") as result_file, warnings.catch_warnings():
warnings.simplefilter("ignore")
logger.info("Converting feeds to pdf...")
pisa_status = pisa.CreatePDF(
self._get_html(
feeds=feeds,
fonts=str(Path(Path(__file__).parent.resolve(), "fonts")),
),
dest=result_file,
)
if pisa_status.err:
logger.warning("Some error occurred when converting feeds to pdf!")
except FileNotFoundError:
logger.warning(
f"Failed to save pdf file. Seems directory {dir_path} doesn't exist."
)
except Exception as e:
logger.warning(f"Failed to save pdf file because of {type(e).__name__}")
os.remove(file_path)
else:
logger.info(f"Saved pdf in {file_path}.")
def _to_epub(self, feeds: List[Feed]) -> None:
"""Provides functionality to convert feeds to .epub format."""
dir_path = self.fmt["epub"]
file_path = Path(dir_path, "news.epub")
book = epub.EpubBook()
book.set_identifier("id")
book.set_title("RSS News")
book.set_language("en")
toc = []
spine = ["nav"]
for feed in feeds:
for num, item in enumerate(feed.items, start=1):
chapter = epub.EpubHtml(title=item.title, file_name=f"{num}.xhtml")
chapter.content = self._get_xhtml(item=item, language=feed.language)
book.add_item(chapter)
spine.append(chapter)
toc.append(epub.Section(item.title))
toc.append(chapter)
book.toc = tuple(toc)
book.spine = spine
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
with warnings.catch_warnings():
warnings.simplefilter("ignore")
epub.write_epub(file_path, book)
logger.info(f"Saved epub in {file_path}.")
def convert(self, feeds: List[Feed]) -> None:
"""Public method to convert accumulated feeds to supported formats depending on passed console arguments."""
if "html" in self.fmt:
self._to_html(feeds)
if "pdf" in self.fmt:
self._to_pdf(feeds)
if "epub" in self.fmt:
self._to_epub(feeds) | /rss-news-reader-3.2.5.tar.gz/rss-news-reader-3.2.5/rss_news_reader/converter/_converter.py | 0.807423 | 0.337667 | _converter.py | pypi |
from rss_news_reader.xml_parser import Element
from ._rss_models import Feed
from ._url_resolver import URLResolver
class RSSBuilder:
"""Class to build RSS feed based on dom object of parsed XML."""
def __init__(self, dom: Element, limit: int, check_urls: bool):
self.dom = dom
self.limit = limit
self.check_urls = check_urls
def build_feed(self) -> Feed:
"""Public method to build RSS Feed. At the beginning it collects all urls from items in XML dom object to
resolve their types, after that RSS items are built with resolved urls."""
def limitation_gen(limit: int):
"""Helper generator function to yield limited amount of items. Used in conjunction with zip function."""
i = 1
while i != limit + 1:
yield i
i += 1
all_urls = {
i: item.find_urls()
for i, item in zip(
limitation_gen(self.limit), self.dom.find_all("item", nested=False)
)
}
url_resolver = URLResolver(all_urls, self.check_urls)
resolved_urls = url_resolver.resolve_urls()
feed_items = []
for i, item in zip(
limitation_gen(self.limit), self.dom.find_all("item", nested=False)
):
item_link = item.get_element_text("link")
images = list(
map(
lambda url: url.source,
filter(lambda url: url.item_num == i, resolved_urls["image"]),
)
)
audios = list(
map(
lambda url: url.source,
filter(lambda url: url.item_num == i, resolved_urls["audio"]),
)
)
others = list(
map(
lambda url: url.source,
filter(
lambda url: url.item_num == i and url.source != item_link,
resolved_urls["other"],
),
)
)
feed_item = {
"title": item.get_element_text("title"),
"description": item.get_element_text("description"),
"link": item_link,
"author": item.get_element_text("author"),
"pubDate": item.get_element_text("pubDate"),
"links": {
"images": images,
"audios": audios,
"others": others,
},
}
feed_items.append(feed_item)
feed_data = {
"title": self.dom.get_element_text("title"),
"description": self.dom.get_element_text("description"),
"link": self.dom.get_element_text("link"),
"image": self.dom.find("image").get_element_text("url"),
"language": self.dom.get_element_text("language"),
"items": feed_items,
}
return Feed(**feed_data) | /rss-news-reader-3.2.5.tar.gz/rss-news-reader-3.2.5/rss_news_reader/rss_builder/_builder.py | 0.734881 | 0.200675 | _builder.py | pypi |
import logging
from collections import deque
from ._parser_models import Element
from ._tokenizer import Tokenizer, TokenType, XMLError
logger = logging.getLogger("rss-news-reader")
class Parser:
"""XML parser class exploiting tokenization principle."""
def __init__(self, xml: str):
self.xml = xml
def _tokenize(self, tokenizer: Tokenizer, stack: deque) -> None:
"""Tokenization method. Acts based on the current token_type."""
try:
for token in tokenizer:
if tokenizer.token_type == TokenType.START_TAG:
if len(stack) != 0:
stack[-1].children.append(token)
token.parent = stack[-1]
stack.append(token)
elif tokenizer.token_type == TokenType.END_TAG:
if len(stack) > 1:
try:
while stack.pop().tag_name != token.tag_name:
pass
except IndexError:
# issue with https://feedforall.com/sample.xml
raise XMLError(f"Tag {token} violates nesting rules!")
elif tokenizer.token_type == TokenType.TEXT:
if tokenizer.text and not tokenizer.text.isspace():
stack[-1].children.append(token)
token.parent = stack[-1]
elif tokenizer.token_type == TokenType.CDATA:
# recursively parse CDATA
self._tokenize(tokenizer.cdata_tokenizer, stack)
finally:
tokenizer.xml_io.close()
def parse(self) -> Element:
"""Public method providing an interface for parsing XML."""
tokenizer = Tokenizer(self.xml)
element_stack = deque()
logger.info("Start parsing RSS...")
self._tokenize(tokenizer, element_stack)
logger.info("Successfully parsed RSS document!")
return element_stack.pop() | /rss-news-reader-3.2.5.tar.gz/rss-news-reader-3.2.5/rss_news_reader/xml_parser/_parser.py | 0.706798 | 0.164382 | _parser.py | pypi |
import re
from typing import Optional
from urllib.parse import urlparse
from pydantic import BaseModel
class Attribute(BaseModel):
"""Represents an attribute inside XML tag."""
name: str
# optional, because there may be the following situation: <script async
# src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>, notice async, it has no value
value: Optional[str]
class Element(BaseModel):
"""Represents an element of XML dom tree."""
tag_name: Optional[str]
attributes: Optional[list[Attribute]] = []
parent: Optional["Element"]
children: Optional["list[Element]"] = []
text: Optional[str]
def find_all(self, tag_name: str, *, nested: bool = True):
"""
Generator method yielding all elements having a given tag_name in the subtree of the current element.\n
By default, nested parameter is equal to True, this means that find_all will traverse the whole subtree of the
given element.\n
If nested is set to False, then it is implied that elements to be searched don't contain
other elements with the same tag_name.\n
Summing up, if XML structure is rather complicated, and we are acknowledged that searched elements don't
contain elements with the same tag_name, then providing this option may drastically speed up elements searching.
"""
for child in self.children:
if child.tag_name == tag_name:
yield child
if not nested:
continue
yield from child.find_all(tag_name)
def find(self, tag_name: str) -> "Element":
"""Returns the next element with the given tag_name in the subtree relatively to the current one. Returns an
empty Element if not found."""
for child in self.children:
if child.tag_name == tag_name:
return child
else:
next_child = child.find(tag_name)
try:
if next_child.tag_name == tag_name:
return next_child
except AttributeError:
pass
return Element()
def _find_all_urls(self):
"""Generator private method yielding all URLs in the subtree of the given element. Element's text for URL
presence is explored as well as its attributes."""
for child in self.children:
if re.match("http", child.text):
yield child.text
for attr in child.attributes:
if attr.value and re.match("http", attr.value):
yield attr.value
yield from child._find_all_urls()
def find_urls(self):
"""Public method returning a set of URLs in the subtree of the given element. Whenever several URLs having
different parameters and pointing to the same image encountered, image with higher resolution is chosen."""
urls_dict = {}
# splitting URLs by the groups
for url in self._find_all_urls():
url_no_params = urlparse(url).path
if url_no_params not in urls_dict:
urls_dict[url_no_params] = []
urls_dict[url_no_params].append(url)
return {max(similar_urls_group) for similar_urls_group in urls_dict.values()}
def _find_text(self):
"""Generator method yielding all stripped text occurrences in the subtree of the current element."""
for child in self.children:
if not child.tag_name:
yield child.text.strip()
yield from child._find_text()
def get_element_text(self, tag_name: str):
"""Returns concatenated text occurrences in the subtree of the item specified by the given tag_name, which,
in turn, is situated in the subtree of the current item."""
try:
return " ".join(part for part in self.find(tag_name)._find_text() if part)
except AttributeError:
return ""
def __str__(self):
return f"<{self.tag_name}>"
def __repr__(self):
return f"<{self.tag_name}>"
def __eq__(self, other: "Element"):
if (
self.tag_name == other.tag_name
and self.attributes == other.attributes
and self.text == other.text
):
for self_child, other_child in zip(self.children, other.children):
if self_child != other_child:
return False
return True
else:
return False | /rss-news-reader-3.2.5.tar.gz/rss-news-reader-3.2.5/rss_news_reader/xml_parser/_parser_models.py | 0.879367 | 0.254555 | _parser_models.py | pypi |
import json
from typing import List
from colorama import Back, Fore, Style, init
from pydantic import BaseModel
from rss_news_reader.rss_builder import Feed, Item
class JSONFeeds(BaseModel):
"""Model to handle a list of feeds when converting them to json format."""
feeds: List[Feed]
class NewsPrinter:
"""Class for printing parsed feeds to console. Depending on whether --colorize argument was passed,
news are printed either colored or not."""
def __init__(self, to_json: bool, colorize: bool):
self.to_json = to_json
self.colorize = colorize
@staticmethod
def _to_json(model: BaseModel):
"""Method to convert feeds to json format."""
model = model.json()
parsed_json = json.loads(model)
model = json.dumps(parsed_json, indent=4, ensure_ascii=False)
return model
@staticmethod
def _print_item_stuffing(item: Item):
"""Print the major part of an Item."""
if item.title:
print(f"Title: {item.title}", end="\n\n ")
if item.description:
print(f"{item.description}", end="\n\n ")
if item.link:
print(f"Link: {item.link}", end="\n\n ")
if item.author:
print(f"Author: {item.author}", end="\n\n ")
if item.pubDate:
print(f"Publication date: {item.pubDate}", end="\n\n ")
if any(item.links.values()):
print(f"Links:", end="\n")
for name, named_links in item.links.items():
if named_links:
print(f" {name}:\n ", end="")
for i, link in enumerate(named_links, start=1):
print(f"[{i}]: {link}\n ", end="")
print()
@staticmethod
def _print_uncolored(feeds: List[Feed]):
"""Prints news without colorizing."""
for feed in feeds:
print(f"Feed: {feed.title}\n\n{feed.description}\n\nLink: {feed.link}\n")
if feed.image:
print(f"Image: {feed.image}\n")
for i, item in enumerate(feed.items, start=1):
print(f"Item {i}:", end="\n\n ")
NewsPrinter._print_item_stuffing(item)
print()
@staticmethod
def _print_colored(feeds: List[Feed]):
"""
Prints colorized news.
Attention! Colorization strongly depends on the type of the terminal the final user
utilizes and may look rather clumsy in some of them.
"""
# colorama's init
init()
for feed in feeds:
print(Back.RED + "\n" + Style.RESET_ALL, end="")
print(
Style.NORMAL
+ Fore.LIGHTWHITE_EX
+ Back.RED
+ f"\nFeed: {feed.title}\n"
+ Style.RESET_ALL,
end="",
)
print(
Style.NORMAL
+ Fore.LIGHTWHITE_EX
+ Back.LIGHTBLUE_EX
+ f"\n{feed.description}\n"
+ Style.RESET_ALL,
end="",
)
print(
Style.NORMAL
+ Fore.LIGHTWHITE_EX
+ Back.RED
+ f"\nLink: {feed.link}\n"
+ Style.RESET_ALL,
end="",
)
if feed.image:
print(
Style.NORMAL
+ Fore.LIGHTWHITE_EX
+ Back.RED
+ f"\nImage: {feed.image}\n"
+ Style.RESET_ALL,
end="",
)
for i, item in enumerate(feed.items, start=1):
if i % 2 == 1:
print(
Style.NORMAL
+ Fore.LIGHTWHITE_EX
+ Back.LIGHTBLACK_EX
+ f"\nItem {i}:",
end="\n\n ",
)
else:
print(
Style.NORMAL
+ Fore.LIGHTBLACK_EX
+ Back.LIGHTWHITE_EX
+ f"\nItem {i}:",
end="\n\n ",
)
NewsPrinter._print_item_stuffing(item)
print(Style.RESET_ALL)
def print(self, feeds: List[Feed]):
"""Public method to print obtained feeds to console."""
if self.to_json:
print(NewsPrinter._to_json(JSONFeeds(feeds=feeds)))
elif self.colorize:
NewsPrinter._print_colored(feeds)
else:
NewsPrinter._print_uncolored(feeds) | /rss-news-reader-3.2.5.tar.gz/rss-news-reader-3.2.5/rss_news_reader/printer/_printer.py | 0.708616 | 0.194387 | _printer.py | pypi |
try:
import json
import logging
import os
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
except ImportError as error:
raise SystemExit(f"Error: There was a problem with import of the module/package: {error.name}.")
def get_cached_content_paths(cache_path):
""" Given the cache directory of a feed, returns paths to article, images and links files
Args:
cache_path: cache directory of a single feed
Returns:
article_path (str): path to the text file containing the article
images_paths (list): a list of paths in string referring to the images downloaded from the news article
links_path (str): path to the text file containing the links extracted from the feed's news article
"""
article_path = None
links_path = None
images_paths = []
items = os.listdir(cache_path)
for item in items:
# item is a tuple, with first element being the date and the second
# element being the feed's dictionary
if item.endswith("images"):
for image in os.listdir(os.path.abspath(os.path.join(cache_path, item))):
images_paths.append(os.path.join(cache_path, 'images', image))
elif item == "links.txt":
links_path = os.path.abspath(os.path.join(cache_path, item))
else:
article_path = os.path.abspath(os.path.join(cache_path, item))
return article_path, images_paths, links_path
def get_conversion_data(item):
""" Extracts the news feed info given it's dictionary
Args:
item (dict): dictionary of cached feed content
Returns:
title (str): the title of feed's news
date (str): the published date of the news
article: new's article
images_paths (list): downloaded images path list
links_list (list): list of links in the article
"""
news = item[1]
# cache directory of the feed
directory = news.get("cache_directory")
article_path, images_paths, links_path = get_cached_content_paths(directory)
with open(article_path, encoding="utf-8") as f:
lines = f.read()
# news article extracted
article = lines
with open(links_path, 'r') as f:
links_list = json.loads(f.read())
title = news.get("title")
date = news.get("date")
return title, date, article, images_paths, links_list
def convert_pdf(news_tuples_list, input_path):
""" Converts a list of news into pdf format
Args:
news_tuples_list: list of feed info in a tuple structure
input_path: the path in which the converted pdf should be stored
Returns:
None
"""
doc = SimpleDocTemplate(input_path, pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
news_id = 0
Story = []
for item in news_tuples_list:
title, date, article, images_paths, links_list = get_conversion_data(item)
logging.info("feed's content retrieved for conversion")
news_id += 1
# Structuring the pdf file
Story.append(Spacer(1, 12))
Story.append(Spacer(1, 12))
ptext = '<b>News Number: </b>: %s' % news_id
Story.append(Spacer(1, 12))
Story.append(Paragraph(ptext, styles["Normal"]))
ptext = '<b>Title</b>: %s' % title
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
ptext = '<b>Date</b>: %s' % date
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
ptext = '<b>Article</b>: %s' % article
Story.append(Paragraph(ptext, styles["Justify"]))
Story.append(Spacer(1, 12))
for pic in images_paths:
im = Image(pic, 2 * inch, 2 * inch)
Story.append(im)
Story.append(Spacer(1, 12))
Story.append(Spacer(1, 12))
for link in links_list:
ptext = '<a href = %s >Link</a>' % link
Story.append(Paragraph(ptext, styles["Normal"]))
doc.build(Story)
def convert_html(news_tuples_list, input_path):
""" Converts a list of news into HTML format
Args:
news_tuples_list: list of feed info in a tuple structure
input_path: the path in which the converted HTML should be stored
Returns:
None
"""
news_id = 0
body = ""
for item in news_tuples_list:
news_id += 1
title, date, article, images_paths, links_list = get_conversion_data(item)
logging.info("feed's content retrieved for conversion")
# structuring the html file
text = "<h2>News Number {News_id}</h2><p><b>Title: </b>{Title}<br><b>Date: </b>{Date}<br><b>Article: " \
"</b>{Article}<br></p>"
text = text.format(News_id=news_id, Title=title, Date=date, Article=article)
for image_path in images_paths:
text = text + '<img src = {Image_Path} width="200" height="250" style="vertical-align:' \
'middle;margin:0px 5px">'.format(Image_Path=image_path)
n = 0
for link in links_list:
n += 1
text = text + "<p><a href={Link}>link number {N}</a></p>".format(Link=link, N=n)
body = body + text
text = '''<html><body>{Text}</body></html>'''.format(Text=body)
if not input_path.endswith(".html"):
logging.error("Input path is not ending with .html")
raise SystemExit('ERROR: Path extension must be .html')
# creating the file
try:
file = open(input_path, "w", encoding="utf-8")
file.write(text)
file.close()
except FileNotFoundError:
logging.error("Provided path is not correct.")
raise SystemExit('Path is not correct. Please Input a correct Path.') | /rss-parser-celine-trial1-5.1.0.tar.gz/rss-parser-celine-trial1-5.1.0/RSSparser/convert_format.py | 0.636918 | 0.233499 | convert_format.py | pypi |
from typing import List, Optional
from pydantic import Field
from rss_parser.models import XMLBaseModel
from rss_parser.models.image import Image
from rss_parser.models.item import Item
from rss_parser.models.text_input import TextInput
from rss_parser.models.types.date import DateTimeOrStr
from rss_parser.models.types.tag import Tag
class RequiredChannelElementsMixin(XMLBaseModel):
"""https://www.rssboard.org/rss-specification#requiredChannelElements."""
title: Tag[str] = None # GoUpstate.com News Headlines
"The name of the channel. It's how people refer to your service. If you have an HTML website that contains " "the same information as your RSS file, the title of your channel should be the same as the title of your " "website." # noqa
link: Tag[str] = None # http://www.goupstate.com/
"The URL to the HTML website corresponding to the channel."
description: Tag[str] = None # The latest news from GoUpstate.com, a Spartanburg Herald-Journal Web site.
"Phrase or sentence describing the channel."
class OptionalChannelElementsMixin(XMLBaseModel):
"""https://www.rssboard.org/rss-specification#optionalChannelElements."""
items: Optional[List[Tag[Item]]] = Field(alias="item", default=[])
language: Optional[Tag[str]] = None # en-us
"The language the channel is written in. This allows aggregators to group all Italian language sites, " "for example, on a single page." # noqa
copyright: Optional[Tag[str]] = None # Copyright 2002, Spartanburg Herald-Journal # noqa
"Copyright notice for content in the channel."
managing_editor: Optional[Tag[str]] = None # geo@herald.com (George Matesky)
"Email address for person responsible for editorial content."
web_master: Optional[Tag[str]] = None # betty@herald.com (Betty Guernsey)
"Email address for person responsible for technical issues relating to channel."
pub_date: Optional[Tag[DateTimeOrStr]] = None # Sat, 07 Sep 2002 00:00:01 GMT
"The publication date for the content in the channel. For example, the New York Times publishes on a daily " "basis, the publication date flips once every 24 hours. That's when the pubDate of the channel changes. All " "date-times in RSS conform to the Date and Time Specification of RFC 822, with the exception that the year " "may be expressed with two characters or four characters (four preferred)." # noqa
last_build_date: Optional[Tag[DateTimeOrStr]] = None # Sat, 07 Sep 2002 09:42:31 GMT
"The last time the content of the channel changed."
category: Optional[Tag[str]] = None # Newspapers
"Specify one or more categories that the channel belongs to. Follows the same rules as the <item.py>-level " "category element." # noqa
generator: Optional[Tag[str]] = None # MightyInHouse Content System v2.3
"A string indicating the program used to generate the channel."
docs: Optional[Tag[str]] = None # https://www.rssboard.org/rss-specification
"A URL that points to the documentation for the format used in the RSS file. It's probably a pointer to this " "page. It's for people who might stumble across an RSS file on a Web server 25 years from now and wonder what " "it is." # noqa
cloud: Optional[Tag[str]] = None # <cloud domain="rpc.sys.com" protocol="soap"/>
"Allows processes to register with a cloud to be notified of updates to the channel, implementing a lightweight " "publish-subscribe protocol for RSS feeds." # noqa
ttl: Optional[Tag[str]] = None # 60
"ttl stands for time to live. It's a number of minutes that indicates how long a channel can be cached before " "refreshing from the source." # noqa
image: Optional[Tag[Image]] = None
"Specifies a GIF, JPEG or PNG image that can be displayed with the channel."
rating: Optional[Tag[TextInput]] = None
"The PICS rating for the channel."
text_input: Optional[Tag[str]] = None
"Specifies a text input box that can be displayed with the channel."
skip_hours: Optional[Tag[str]] = None
"A hint for aggregators telling them which hours they can skip. This element contains up to 24 <hour> " "sub-elements whose value is a number between 0 and 23, representing a time in GMT, when aggregators, if " "they support the feature, may not read the channel on hours listed in the <skipHours> element. The hour " "beginning at midnight is hour zero." # noqa
skip_days: Optional[Tag[str]] = None
"A hint for aggregators telling them which days they can skip. This element contains up to seven <day> " "sub-elements whose value is Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday. Aggregators " "may not read the channel during days listed in the <skipDays> element." # noqa
class Channel(RequiredChannelElementsMixin, OptionalChannelElementsMixin, XMLBaseModel):
pass | /rss_parser-1.1.0-py3-none-any.whl/rss_parser/models/channel.py | 0.910471 | 0.359786 | channel.py | pypi |
import warnings
from copy import deepcopy
from json import loads
from math import ceil, floor, trunc
from operator import add, eq, floordiv, ge, gt, index, invert, le, lt, mod, mul, ne, neg, pos, pow, sub, truediv
from typing import Generic, Optional, Type, TypeVar, Union
from pydantic import create_model
from pydantic.generics import GenericModel
from pydantic.json import pydantic_encoder
from rss_parser.models import XMLBaseModel
T = TypeVar("T")
class TagRaw(GenericModel, Generic[T]):
"""
>>> from rss_parser.models import XMLBaseModel
>>> class Model(XMLBaseModel):
... number: Tag[int]
... string: Tag[str]
>>> m = Model(
... number=1,
... string={'@attr': '1', '#text': 'content'},
... )
>>> # Content value is an integer, as per the generic type
>>> m.number.content
1
>>> # But you're still able to use the Tag itself in common operators
>>> m.number.content + 10 == m.number + 10
True
>>> # As it's the case for methods/attributes not found in the Tag itself
>>> m.number.bit_length()
1
>>> # types are NOT the same, however, the interfaces are very similar most of the time
>>> type(m.number), type(m.number.content)
(<class 'rss_parser.models.image.Tag[int]'>, <class 'int'>)
>>> # The attributes are empty by default
>>> m.number.attributes
{}
>>> # But are populated when provided.
>>> # Note that the @ symbol is trimmed from the beggining, however, camelCase is not converted
>>> m.string.attributes
{'attr': '1'}
>>> # Generic argument types are handled by pydantic - let's try to provide a string for a Tag[int] number
>>> m = Model(number='not_a_number', string={'@customAttr': 'v', '#text': 'str tag value'})
Traceback (most recent call last):
...
pydantic.error_wrappers.ValidationError: 1 validation error for Model
number -> content
value is not a valid integer (type=type_error.integer)
"""
# Optional in case of self-closing tags
content: Optional[T]
attributes: dict
def __getattr__(self, item):
"""Forward default getattr for content for simplicity."""
return getattr(self.content, item)
def __getitem__(self, key):
return self.content[key]
def __setitem__(self, key, value):
self.content[key] = value
@classmethod
def __get_validators__(cls):
yield cls.pre_convert
yield cls.validate
@classmethod
def pre_convert(cls, v: Union[T, dict], **kwargs): # noqa
"""Used to split tag's text with other xml attributes."""
if isinstance(v, dict):
data = deepcopy(v)
attributes = {k.lstrip("@"): v for k, v in data.items() if k.startswith("@")}
content = data.pop("#text", data) if not len(attributes) == len(data) else None
return {"content": content, "attributes": attributes}
return {"content": v, "attributes": {}}
@classmethod
def flatten_tag_encoder(cls, v):
"""Encoder that translates Tag objects (dict) to plain .content values (T)."""
bases = v.__class__.__bases__
if XMLBaseModel in bases:
# Can't pass encoder to .dict :/
return loads(v.json_plain())
if cls in bases:
return v.content
return pydantic_encoder(v)
_OPERATOR_MAPPING = {
# Unary
"__pos__": pos,
"__neg__": neg,
"__abs__": abs,
"__invert__": invert,
"__round__": round,
"__floor__": floor,
"__ceil__": ceil,
# Conversion
"__str__": str,
"__int__": int,
"__float__": float,
"__bool__": bool,
"__complex__": complex,
"__oct__": oct,
"__hex__": hex,
"__index__": index,
"__trunc__": trunc,
# Comparison
"__lt__": lt,
"__gt__": gt,
"__le__": le,
"__eq__": eq,
"__ne__": ne,
"__ge__": ge,
# Arithmetic
"__add__": add,
"__sub__": sub,
"__mul__": mul,
"__truediv__": truediv,
"__floordiv__": floordiv,
"__mod__": mod,
"__pow__": pow,
}
def _make_proxy_operator(operator):
def f(self, *args):
return operator(self.content, *args)
f.__name__ = operator.__name__
return f
with warnings.catch_warnings():
# Ignoring pydantic's warnings when inserting dunder methods (this is not a field so we don't care)
warnings.filterwarnings("ignore", message="fields may not start with an underscore")
Tag: Type[TagRaw] = create_model(
"Tag",
__base__=(TagRaw, Generic[T]),
**{method: _make_proxy_operator(operator) for method, operator in _OPERATOR_MAPPING.items()},
) | /rss_parser-1.1.0-py3-none-any.whl/rss_parser/models/types/tag.py | 0.895423 | 0.255193 | tag.py | pypi |
import sqlite3
import datetime
from .verbosity import method_verbosity
def adapt_date_iso(val):
"""Adapt datetime.date to ISO 8601 date."""
return val.isoformat()
def convert_date(val):
"""Convert ISO 8601 date to datetime.date object."""
return datetime.date.fromisoformat(val)
class DBHandler:
def __init__(self, verbose=False):
self.con = sqlite3.connect('rss.db')
self.cursor = self.con.cursor()
self.verbose = verbose
@method_verbosity
def _create_table(self):
'''
Creates table if needed
'''
self.cursor.execute('CREATE TABLE IF NOT EXISTS entries (title TEXT, published date, link TEXT, feed TEXT)')
@method_verbosity
def write(self, entries, feed_title):
'''
Inserts entries info to database.
Checks if entry is already in db by comparing titles.
If entry is already in db, does nothing
'''
self._create_table()
query = self.cursor.execute('SELECT * FROM entries')
entries_from_db = query.fetchall()
titles = [entry[0] for entry in entries_from_db]
entries_to_be_written = []
for entry in entries:
if entry['Title'] not in titles:
entries_to_be_written.append((entry['Title'], entry['Date'], entry['Link'], feed_title))
if entries_to_be_written:
self.cursor.executemany("INSERT INTO entries VALUES(?, ?, ?, ?)", entries_to_be_written)
self.con.commit()
@method_verbosity
def read_from_db(self, date_string):
'''
Selects rows from table for given date.
Structures and returns ready to be outputed data
'''
date = datetime.datetime.strptime(date_string, '%Y%m%d').date()
query = self.cursor.execute('SELECT * FROM entries WHERE published = :date', {'date': date})
entries = query.fetchall()
if entries:
feed = entries[0][3]
output_entries = [
{
'Title': entry[0],
'Date': entry[1],
'Link': entry[2]
}
for entry in entries
]
return {'Feed': feed, 'Entries': output_entries}
else:
raise ValueError('No cached entry for given date') | /rss-reader-bektur-4.1.3.tar.gz/rss-reader-bektur-4.1.3/rss_reader/cache.py | 0.778902 | 0.181825 | cache.py | pypi |
# RSS Reader
## Description
Command-line RSS reader utility implemented in Python
## Installation
### 1. Install from PyPI repository
Run ```pip install rss-reader-sardor-irgashev```
### 2. Clone from GitLab
1. Clone the repository
2. Install necessary requirements by running ```pip install -r requirements.txt```
## Interface
Utility provides the following interface:
```shell
usage: rss_reader.py [-h] [--version] [--json] [--verbose] [--limit LIMIT]
source
Pure Python command-line RSS reader.
positional arguments:
source RSS URL
optional arguments:
-h, --help show this help message and exit
--version Print version info
--json Print result as JSON in stdout
--verbose Outputs verbose status messages
--limit LIMIT Limit news topics if this parameter provided
--date DATE News publishing date
--to-html HTML Convert news to HTML
--to-pdf PDF Convert news to PDF
--colorize Enables colorized output
```
## Usage Examples
```
> python3 rss_reader.py http://rss.cnn.com/rss/edition_world.rss --limit 1
```
```shell
Feed Title: CNN.com - RSS Channel - World
News Title: China's Weibo shows user locations to combat 'bad behavior'
Date Published: Thu, 28 Apr 2022 15:55:04 GMT
Description: Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat "bad behavior" online.
Link: https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html
Image: https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg
====================================================================================
```
```
> python3 rss_reader.py http://rss.cnn.com/rss/edition_world.rss --limit 1 --json
```
```shell
[
{
"Feed Title": "CNN.com - RSS Channel - World",
"Feed Source": "http://rss.cnn.com/rss/edition_world.rss",
"News Item": {
"News Title": "China's Weibo shows user locations to combat 'bad behavior'",
"Publication Date": "Thu, 28 Apr 2022 15:55:04 GMT",
"Description": "Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat \"bad behavior\" online.",
"Link": "https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html",
"Image Link": "https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg"
}
}
]
```
```
> python3 rss_reader.py http://rss.cnn.com/rss/edition_world.rss --date 20220428 --limit 1
```
```shell
Feed Title: CNN.com - RSS Channel - World
News Title: China's Weibo shows user locations to combat 'bad behavior'
Date Published: Thu, 28 Apr 2022 15:55:04 GMT
Description: Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat "bad behavior" online.
Link: https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html
Image: https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg
====================================================================================
```
```
> python3 rss_reader.py http://rss.cnn.com/rss/edition_world.rss --date 20220428 --limit 1 --json
```
```shell
[
{
"Feed Title": "CNN.com - RSS Channel - World",
"Feed Source": "http://rss.cnn.com/rss/edition_world.rss",
"News Item": {
"News Title": "China's Weibo shows user locations to combat 'bad behavior'",
"Publication Date": "Thu, 28 Apr 2022 15:55:04 GMT",
"Description": "Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat \"bad behavior\" online.",
"Link": "https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html",
"Image Link": "https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg"
}
}
]
```
```
> python3 rss_reader.py --date 20220428 --limit 1
```
```shell
Feed Title: CNN.com - RSS Channel - World
News Title: China's Weibo shows user locations to combat 'bad behavior'
Date Published: Thu, 28 Apr 2022 15:55:04 GMT
Description: Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat "bad behavior" online.
Link: https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html
Image: https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg
====================================================================================
```
```
> python3 rss_reader.py --date 20220428 --json --limit 1
```
```shell
[
{
"Feed Title": "CNN.com - RSS Channel - World",
"Feed Source": "http://rss.cnn.com/rss/edition_world.rss",
"News Item": {
"News Title": "China's Weibo shows user locations to combat 'bad behavior'",
"Publication Date": "Thu, 28 Apr 2022 15:55:04 GMT",
"Description": "Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat \"bad behavior\" online.",
"Link": "https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html",
"Image Link": "https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg"
}
}
]
```
```
> python3 rss_reader.py http://rss.cnn.com/rss/edition_world.rss --to-pdf ./ --to-html ./
```
```shell
Creates both HTML and PDF files at the specified location
```
#### Alternatives with installation from PyPI
```
> rss_reader http://rss.cnn.com/rss/edition_world.rss --limit 1
```
```shell
Feed Title: CNN.com - RSS Channel - World
News Title: China's Weibo shows user locations to combat 'bad behavior'
Date Published: Thu, 28 Apr 2022 15:55:04 GMT
Description: Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat "bad behavior" online.
Link: https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html
Image: https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg
====================================================================================
```
```
> rss_reader http://rss.cnn.com/rss/edition_world.rss --limit 1 --json
```
```shell
[
{
"Feed Title": "CNN.com - RSS Channel - World",
"Feed Source": "http://rss.cnn.com/rss/edition_world.rss",
"News Item": {
"News Title": "China's Weibo shows user locations to combat 'bad behavior'",
"Publication Date": "Thu, 28 Apr 2022 15:55:04 GMT",
"Description": "Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat \"bad behavior\" online.",
"Link": "https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html",
"Image Link": "https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg"
}
}
]
```
```
> rss_reader http://rss.cnn.com/rss/edition_world.rss --date 20220428 --limit 1
```
```shell
Feed Title: CNN.com - RSS Channel - World
News Title: China's Weibo shows user locations to combat 'bad behavior'
Date Published: Thu, 28 Apr 2022 15:55:04 GMT
Description: Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat "bad behavior" online.
Link: https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html
Image: https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg
====================================================================================
```
```
> rss_reader http://rss.cnn.com/rss/edition_world.rss --date 20220428 --limit 1 --json
```
```shell
[
{
"Feed Title": "CNN.com - RSS Channel - World",
"Feed Source": "http://rss.cnn.com/rss/edition_world.rss",
"News Item": {
"News Title": "China's Weibo shows user locations to combat 'bad behavior'",
"Publication Date": "Thu, 28 Apr 2022 15:55:04 GMT",
"Description": "Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat \"bad behavior\" online.",
"Link": "https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html",
"Image Link": "https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg"
}
}
]
```
```
> rss_reader --date 20220428 --limit 1
```
```shell
Feed Title: CNN.com - RSS Channel - World
News Title: China's Weibo shows user locations to combat 'bad behavior'
Date Published: Thu, 28 Apr 2022 15:55:04 GMT
Description: Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat "bad behavior" online.
Link: https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html
Image: https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg
====================================================================================
```
```
> rss_reader --date 20220428 --json --limit 1
```
```shell
[
{
"Feed Title": "CNN.com - RSS Channel - World",
"Feed Source": "http://rss.cnn.com/rss/edition_world.rss",
"News Item": {
"News Title": "China's Weibo shows user locations to combat 'bad behavior'",
"Publication Date": "Thu, 28 Apr 2022 15:55:04 GMT",
"Description": "Weibo, China's equivalent of Twitter, told users on Thursday it would start to publish their IP locations on their account pages and when they post comments, in a bid to combat \"bad behavior\" online.",
"Link": "https://www.cnn.com/2022/04/28/tech/weibo-user-location-bad-behavior/index.html",
"Image Link": "https://cdn.cnn.com/cnnnext/dam/assets/220428104403-weibo-app-china-file-restricted-super-169.jpg"
}
}
]
```
```
> rss_reader http://rss.cnn.com/rss/edition_world.rss --to-pdf ./ --to-html ./
```
```shell
Creates both HTML and PDF files at the specified location
```
## Feed Sources
1. https://moxie.foxnews.com/feedburner/latest.xml
2. https://www.scmp.com/rss/5/feed
3. http://rss.cnn.com/rss/edition_world.rss
4. https://globalnews.ca/feed/
5. https://www.washingtontimes.com/rss/headlines/news/world/ | /rss-reader-sardor-irgashev-5.0.0.tar.gz/rss-reader-sardor-irgashev-5.0.0/README.md | 0.568416 | 0.760917 | README.md | pypi |
import os
import sqlite3
import sys
from datetime import datetime
from logging import getLogger
from typing import List
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
logger = getLogger()
class DatabaseManager:
"""Represents Database management"""
def __init__(self, table_name: str, table_cols: List[str], unique_constraint: str) -> None:
"""DatabaseManager constructor
Args:
table_name: Name of the to be created database table
table_cols: Column names of the to be created database tables
unique_constraint: Unique constraint column name
Returns:
None
"""
self._table_name = table_name
self._table_columns = table_cols
self._db_connection = sqlite3.connect(os.path.join(ROOT_DIR, f'{table_name}.db'))
self._cursor = self._db_connection.cursor()
self._columns = ', '.join(self._table_columns)
self._placeholders = ':' + ', :'.join(self._table_columns)
self._unique = unique_constraint
logger.debug(f'Connected to DB.')
def _create_table(self) -> None:
"""Creates table in database
Returns:
None
"""
query = f"CREATE TABLE IF NOT EXISTS {self._table_name} ({self._columns}, unique ({self._unique}))"
self._cursor.execute(query)
logger.debug(f'Table {self._table_name} operating.')
def insert_into_table(self, content: List[dict]) -> None:
"""Inserts rows into database
Args:
content: List of dictionaries containing to be inserted data
Returns:
None
"""
self._create_table()
query = f"INSERT OR IGNORE INTO {self._table_name} ({self._columns}) VALUES ({self._placeholders})"
self._cursor.executemany(query, content)
self._db_connection.commit()
def _query_db(self, date: str, source: str) -> None:
"""Performs SELECT queries on database
Args:
date: News publishing date
source: Feed source
Returns:
None
"""
if date is not None:
pub_date = datetime.strptime(date, '%Y%m%d').strftime('%a, %d %b %Y') + '%'
if source != '':
query = f"""
SELECT * FROM {self._table_name} WHERE feed_url=:source and publication_date like :publication_date"""
self._cursor.execute(query, {'publication_date': pub_date, 'source': source})
logger.debug('Query performed to fetch rows according to source and publishing date.')
else:
query = f"SELECT * FROM {self._table_name} WHERE publication_date like :publication_date"
self._cursor.execute(query, {'publication_date': pub_date})
logger.debug('Query performed to fetch rows according to publishing date.')
elif source != '':
query = f"SELECT * FROM {self._table_name} WHERE feed_url=:source"
self._cursor.execute(query, {'source': source})
logger.debug('Query performed to fetch rows according to source.')
else:
sys.exit("Neither source nor date provided to perform query. Program terminated. Try again.")
def retrieve_from_db(self, date: str, source: str, limit: int) -> List[tuple]:
"""Retrieves records from database according to arguments specified
Args:
date: News publishing date
source: Feed source
limit: Limit of the to be fetched records
Returns:
Fetched records
"""
self._query_db(date, source)
if limit is not None:
return self._cursor.fetchmany(limit)
else:
return self._cursor.fetchall() | /rss-reader-sardor-irgashev-5.0.0.tar.gz/rss-reader-sardor-irgashev-5.0.0/db_manager/manager.py | 0.690455 | 0.150778 | manager.py | pypi |
import os
import sys
from argparse import ArgumentParser, ArgumentTypeError, Namespace
from datetime import datetime
from logging import getLogger
from _version import __version__
logger = getLogger()
def positive_int(value: str) -> int:
"""Checks whether the provided argument is positive integer
Args:
value: Command-line-provided argument
Returns:
Integer representation of the provided argument
Raises:
ArgumentTypeError: If provided integer value is less than 0
"""
try:
int_val = int(value)
if int_val <= 0:
raise ArgumentTypeError('Limit can take positive integer value only! Program terminated. Try again.')
return int_val
except (ValueError, TypeError):
sys.exit('Limit can take integer value only! Program terminated. Try again.')
def valid_date(date: str) -> str:
"""Checks whether the provided argument is a valid date
Args:
date: Command-line-provided argument for date
Returns:
String represented 'date', parsed according to format provided
Raises:
ArgumentTypeError: If provided 'date' and format cannot be parsed
"""
try:
return datetime.strptime(date, "%Y%m%d").strftime("%Y%m%d")
except ValueError:
raise ArgumentTypeError('Argument is outside of defined ranges. Program terminated. Try again.')
def valid_path(path: str) -> str:
"""Checks whether the provided argument is a valid path
Args:
path: Command-line-provided argument for path
Returns:
String represented path
Raises:
NotADirectoryError: If provided 'path' is not a directory
"""
if os.path.dirname(path):
return path
else:
raise NotADirectoryError(f"{path} is not a directory. Program terminated. Try again.")
def handle_args() -> Namespace:
"""Parses command-line arguments
Returns:
Argument-attributes-populated namespace
"""
parser = ArgumentParser(description="Pure Python command-line RSS reader.")
parser.add_argument('source', nargs='?', default='', help='RSS URL')
parser.add_argument('--version', action='version', version=f'Version {__version__}', help='Print version info')
parser.add_argument('--json', action='store_true', help='Print result as JSON in stdout')
parser.add_argument('--verbose', action='store_true', help='Outputs verbose status messages')
parser.add_argument('--limit', type=positive_int, help='Limit news topics if this parameter provided')
parser.add_argument('--date', type=valid_date, help='News publishing date')
parser.add_argument('--to-html', type=valid_path, dest='html', help='Convert news to HTML')
parser.add_argument('--to-pdf', type=valid_path, dest='pdf', help='Convert news to PDF')
parser.add_argument('--colorize', action='store_true', help='Enables colorized output')
return parser.parse_args() | /rss-reader-sardor-irgashev-5.0.0.tar.gz/rss-reader-sardor-irgashev-5.0.0/argument_parser/arg_parser.py | 0.648911 | 0.19853 | arg_parser.py | pypi |
Implemented Python RSS-reader using python 3.9.
RSS reader is a command-line utility that receives RSS URL and prints results in a human-readable format.
---
Quick start
---
>>> rss_reader https://people.onliner.by/feed --limit 1
---------------------------------- Start Program ----------------------------------
[INFO] Receiving the news was successful
Feed source: https://people.onliner.by/feed
Feed: Люди Onlíner
-------------------------------------- News 1 -------------------------------------
Title: Работник случайно слил в канализацию полторы тонны полуфабриката белорусског
о коньяка. Отправлен под суд
Date: Fri, 29 Oct 2021 15:00:13 +0300
Link: https://people.onliner.by/2021/10/29/vylil-v-kanalizaciyu-poltory-tonny-belor
usskogo-konyaka
Category: Социум
Description: Грустная история случилась весной прошлого года на минском «Кристалле»
: мастер случайно отправил в канализацию 1622,13 литра коньяка. На работника завели
уголовное дело за служебную халатность. Известно про этот случай стало после того,
как было опубликовано решение суда.Читать далее…
Media Object: https://content.onliner.by/news/thumbnail/a4be8e39b5616a231de7fa7960d
81047.jpeg
Extra Links: https://people.onliner.by/2021/10/29/vylil-v-kanalizaciyu-poltory-tonn
y-belorusskogo-konyaka
-----------------------------------------------------------------------------------
----------------------------------- Stop Program ----------------------------------
---
## Contents
***
1. [Installation](#Installation)
2. [Usage](#Usage)
3. [Format converter](#Format-converter)
* [Converter to PDF](#Converter-to-PDF)
* [Converter to HTML](#Converter-to-HTML)
4. [Storage](#Storage)
* [Format of the storage](#Format-of-the-storage)
5. [Tests](#Tests)
6. [What's in the future](#What's-in-the-future)
---
## Installation
```
pip install rss-readerAPI
```
or make a clone of the repository:
```
>>> git clone https://github.com/Aleksey-Mikh/CLI_util_RSS_reader.git
```
After that, you need to go to the CLI_util directory:
```
>>> cd your_path/CLI_util_RSS_reader/
```
and install the utility:
```
>>> pip install -e .
```
Don't forget the dot at the end!
Now you can use the utility in two ways:
```
>>> python rss_reader.py https://people.onliner.by/feed --limit 1
```
or
```
>>> rss_reader https://people.onliner.by/feed --limit 1
```
If needed, add the path to the repo to the environment variable $PYTHONPATH.
The script will be available from everywhere.
Linux:
```
>>> export PYTHONPATH="${PYTHONPATH}:<path to repo>"
```
Windows:
```
>>> set PYTHONPATH=%PYTHONPATH%;<path to repo>
```
---
## Usage
You can see the information about the utility using the following command:
>>> rss_reader --help
usage: rss_reader [-h] [--version] [--json] [--verbose] [--limit LIMIT] [--date DATE] [--to-html TO_HTML]
[--to-pdf TO_PDF] [--colorize]
[source]
Pure Python command-line RSS reader.
positional arguments:
source RSS URL
optional arguments:
-h, --help show this help message and exit
--version Print version info
--json Print result as JSON in stdout
--verbose Outputs verbose status messages
--limit LIMIT Limit news topics if this parameter provided
--date DATE Take a date in %Y%m%d format. Example: 20191206
--to-html TO_HTML This argument receives the path where new HTML file will be saved.
--to-pdf TO_PDF This argument receives the path where new PDF file will be saved.
--colorize That will print the result of the utility in colorized mode.
You can see the version of the utility:
```
>>> rss_reader --version
Version 5.0.0
```
Argument `--version` will output the version despite other arguments:
```
>>> rss_reader https://people.onliner.by/feed --limit 1 --version
Version 5.0.0
```
When entering the `source` argument, the got page is parsed on the Internet,
if you specify the `--date some_date` argument, then the news is searched in the local storage
and an Internet connection is not required.
When you enter only `--date some_date`, the news will be searched for by the specified date.
If the news is not found, an error will be returned:
```
>>> rss_reader --date 20210810
------------------------------------------ Start Program ------------------------------------------
[ERROR] No news was found for this date - 2021-08-10
------------------------------------------- Stop Program ------------------------------------------
```
If you enter `source` and `--date some_date` utility will search news by the got to date
and the got to site. If the news is not found, an error will be returned:
```
>>> rss_reader https://people.onliner.by/feed --date 20210810
-------------------------------------------------- Start Program -------------------------------------------------
[ERROR] No news was founded for this date: 20210810, and this source: https://people.onliner.by/feed
-------------------------------------------------- Stop Program --------------------------------------------------
```
If you want more information about how the program works you can enter the argument `--verbose`:
>>> rss_reader https://people.onliner.by/feed --limit 4 --verbose
---------------------------------------- Start Program ---------------------------------------
[INFO] Start Scrapping
[INFO] Count of news: 4
[INFO] News received [1/4], percent of execution program=25%
[INFO] News received [2/4], percent of execution program=50%
[INFO] News received [3/4], percent of execution program=75%
[INFO] News received [4/4], percent of execution program=100%
[INFO] Receiving the news was successful
[INFO] Stop Scrapping
[INFO] Output news in standard format
Feed source: https://people.onliner.by/feed
Feed: Люди Onlíner
------------------------------------------- News 1 -------------------------------------------
Title: Слишком тепло: в выходные до +18
Date: Fri, 29 Oct 2021 18:00:23 +0300
Link: https://people.onliner.by/2021/10/29/v-vyxodnye-do-18
Category: Социум
Description: После слишком холодного сентября наступает чересчур теплый ноябрь. Не уверены, чт
о последовательность именно такая, но эти месяцы изначально перепутали местами. В общем, на за
втра Белгидромет не исключает 18 градусов, это гораздо теплее нормы.Читать далее…
Media Object: https://content.onliner.by/news/thumbnail/847f53a5b4f75a81f4312d021825dab9.jpeg
Extra Links: https://people.onliner.by/2021/10/29/v-vyxodnye-do-18
----------------------------------------------------------------------------------------------
...
------------------------------------------- News 4 -------------------------------------------
Title: Работник случайно слил в канализацию полторы тонны полуфабриката белорусского коньяка.
Отправлен под суд
Date: Fri, 29 Oct 2021 15:00:13 +0300
Link: https://people.onliner.by/2021/10/29/vylil-v-kanalizaciyu-poltory-tonny-belorusskogo-kon
yaka
Category: Социум
Description: Грустная история случилась весной прошлого года на минском «Кристалле»: мастер сл
учайно отправил в канализацию 1622,13 литра коньяка. На работника завели уголовное дело за слу
жебную халатность. Известно про этот случай стало после того, как было опубликовано решение су
да.Читать далее…
Media Object: https://content.onliner.by/news/thumbnail/a4be8e39b5616a231de7fa7960d81047.jpeg
Extra Links: https://people.onliner.by/2021/10/29/vylil-v-kanalizaciyu-poltory-tonny-belorussk
ogo-konyaka
----------------------------------------------------------------------------------------------
---------------------------------------- Stop Program ----------------------------------------
When you enter the arguments `--to-pdf path` and `--to-html path`,
conversion to the specified format will be executed in addition to the output.
When you enter the `--json` argument, the console will output data in JSON format:
[
{
"channel_title": channel_title,
"source": source,
},
{
"title": title,
"date": pub_date,
"link": link,
"author": author,
"category": list_categories,
"description": description,
"more_description": content_encoded,
"comments": comments,
"media_object": enclosure,
"extra_links": guid,
"source_feed": list_source,
},
]
You can find more information [here](https://github.com/Aleksey-Mikh/CLI_util_RSS_reader/tree/main/cool_project/cervices).
---
## Format converter
The utility supports the conversion of news into the following formats: HTML, PDF.
### Converter to PDF
When you enter the `--to-pdf path` argument, the `feed.pdf` file will be saved to the got path.
You can find more information [here](https://github.com/Aleksey-Mikh/CLI_util_RSS_reader/tree/main/cool_project/conversion_to_format).
### Converter to HTML
When you enter the `--to-html path` argument, the `html.pdf` file will be saved to the got path.
You can find more information [here](https://github.com/Aleksey-Mikh/CLI_util_RSS_reader/tree/main/cool_project/conversion_to_format).
---
## Storage
The utility uses caching of news with the ability to output them without an Internet connection.
### Format of the storage:
storage/
2021-09/
2021-09-18/
2021-10-18_https___lenta_ru_rss.json
2021-09-19/
2021-10-19_https___lenta_ru_rss.json
2021-10/
...
You can find more information [here](https://github.com/Aleksey-Mikh/CLI_util_RSS_reader/tree/main/cool_project/data_storage).
---
## Tests
>>> pytest .\tests\ --cov=rss_reader --cov=.\cool_project\
======================================== test session starts =========================================
platform win32 -- Python 3.9.7, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
rootdir: C:\Users\lehado01\PycharmProjects\EPAM_final_task\Homework\Aleksey_Mikhalkevich\CLI_util
plugins: cov-3.0.0, mock-3.6.1
collected 78 items
tests\test_data_storage\test_storage_managers.py ....................... [ 29%]
tests\test_data_storage\test_working_with_storage.py ... [ 33%]
tests\test_rss_reader\test_rss_reader.py ........ [ 43%]
tests\tests_conversion_to_format\test_conversion_to_html.py ....... [ 52%]
tests\tests_conversion_to_format\test_conversion_to_pdf.py ....... [ 61%]
tests\tests_serializers\test_serializers.py ............ [ 76%]
tests\tests_services\test_data_output.py ........ [ 87%]
tests\tests_services\test_decorators.py ....... [ 96%]
tests\tests_services\test_print_functions.py ... [100%]
----------- coverage: platform win32, python 3.9.7-final-0 -----------
Name Stmts Miss Cover
-----------------------------------------------------------------------------
cool_project\__init__.py 0 0 100%
cool_project\cervices\__init__.py 0 0 100%
cool_project\cervices\data_output.py 41 9 78%
cool_project\cervices\decorators.py 60 0 100%
cool_project\cervices\print_functions.py 6 0 100%
cool_project\conversion_to_format\__init__.py 0 0 100%
cool_project\conversion_to_format\conversion_to_html.py 38 2 95%
cool_project\conversion_to_format\conversion_to_pdf.py 102 4 96%
cool_project\data_storage\__init__.py 0 0 100%
cool_project\data_storage\storage_managers.py 197 0 100%
cool_project\data_storage\working_with_storage.py 53 0 100%
cool_project\serializers\__init__.py 0 0 100%
cool_project\serializers\serializers.py 132 3 98%
rss_reader.py 100 11 89%
-----------------------------------------------------------------------------
TOTAL 729 29 96%
========================================= 78 passed in 2.08s =========================================
---
## What's in the future
In the future, I'm going to upload the utility to PYPI and add more formats for conversions.
## Author
[GitHub](https://github.com/Aleksey-Mikh)
[linkedin](https://www.linkedin.com/in/aliaksei-mikhalkevich-b740b0201/)
mail - lehado67@gmail.com | /rss_readerCLI-5.0.1.tar.gz/rss_readerCLI-5.0.1/README.md | 0.657868 | 0.876052 | README.md | pypi |
from pathlib import Path
from fpdf import FPDF
from cool_project.cervices.print_functions import info_print, error_print
from project_settings import FILE_NAME_PDF
class PDF(FPDF):
"""
Class that generates the PDF file
"""
def _get_item(self, news):
"""
the method that generates the news.
:param news: dict
"""
if news["title"]:
self.multi_cell(0, 5, f"Title: {news['title']}")
self.ln()
if news["date"]:
self.multi_cell(0, 5, f"date of publication: {news['date']}")
self.ln()
if news["link"]:
self.multi_cell(0, 5, f"Link: {news['link']}")
self.ln()
if news["author"]:
self.multi_cell(0, 5, f"Author: {news['author']}")
self.ln()
if news["category"]:
if self.is_list(news["category"]):
for category in news["category"]:
self.cell(0, 5, "Categories: ", ln=1)
self.multi_cell(0, 5, f"{' ' * 5}{category}")
else:
self.multi_cell(0, 5, f"Category: {news['category']}")
self.ln()
if news["description"]:
self.multi_cell(0, 5, f"Description: {news['description']}")
self.ln()
if news["more_description"]:
self.multi_cell(
0, 5, f"More description: {news['more_description']}"
)
self.ln()
if news["comments"]:
self.multi_cell(0, 5, f"Comments: {news['comments']}")
self.ln()
if news["media_object"]:
if self.is_list(news["media_object"]):
for media in news["media_object"]:
self.cell(0, 5, "Media objects: ", ln=1)
self.multi_cell(0, 5, f"{' ' * 5}{media}")
else:
self.multi_cell(0, 5, f"Media object: {news['media_object']}")
self.ln()
if news["extra_links"]:
if self.is_list(news["extra_links"]):
for extra_link in news["extra_links"]:
self.cell(0, 5, "Extra links: ", ln=1)
self.multi_cell(0, 5, f"{' ' * 5}{extra_link}")
else:
self.multi_cell(0, 5, f"Extra link: {news['extra_links']}")
self.ln()
if news["source_feed"]:
if self.is_list(news["source_feed"]):
for source in news["source_feed"]:
self.cell(0, 5, "Sources: ", ln=1)
self.multi_cell(0, 5, f"{' ' * 5}{source}")
else:
self.multi_cell(0, 5, f"Source: {news['source_feed']}")
def body(self, data):
"""
the method that generates the feeds
:param data: a list of dictionaries with news
"""
if not self.is_list(data[0]):
data = [data]
for feed in data:
self.cell(
0, 5, f'Channel title: {feed[0]["channel_title"]}',
align="C", ln=1
)
self.cell(0, 5, f'Source: {feed[0]["source"]}', align="C", ln=1)
for news in feed[1:]:
self.cell(0, 5, f"{'-' * 125}", align="C", ln=1)
self._get_item(news)
self.cell(0, 5, f"{'-' * 125}", align="C")
self.ln()
self.ln()
def footer(self):
"""
footer generation
"""
self.set_y(-10)
self.set_font("DejaVu", "", 15)
self.cell(0, 5, f"Page {str(self.page_no())}", 0, 0, "C")
@staticmethod
def is_list(obj):
"""
Check obj is list.
:param obj: object
:return: True or False
"""
return isinstance(obj, list)
@staticmethod
def make_dir(path):
"""
Creating a folder at the got path.
If the folder already exists does nothing.
:param path: the path where the folder should be created
"""
if not Path(path).exists():
p = Path(path)
p.mkdir(parents=True)
def convertor_to_pdf(data, path, verbose):
"""
Еhe function gets a list of dictionaries with
news, a path, and a verbose flag.
the function gets the path to TTF with fonts and
sets the resulting fonts to the PDF class,
generates PDF from the received data.
Saves the file to the received path.
:param data: a list of dictionaries with news
:param path: the path to save the file
:param verbose: verbose mode
"""
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
path_to_ttf = Path(
Path(__file__).parent, "files_for_pdf", "DejaVuSansCondensed.ttf"
)
pdf.add_font("DejaVu", "", path_to_ttf, uni=True)
if verbose:
info_print("Fonts have been received")
pdf.set_font("DejaVu", "", 14)
pdf.set_auto_page_break(True, 10)
if verbose:
info_print("PDF generation started")
pdf.body(data)
if verbose:
info_print("PDF has been generated")
try:
pdf.make_dir(path)
path = Path(path, FILE_NAME_PDF)
pdf.output(path, "F")
info_print(f"A feed in PDF format was saved on the path: {path}")
except PermissionError:
error_print(
f"it is not possible to save a PDF file on this path: {path}."
f" Such file already exists or cannot access the folder."
)
except Exception:
error_print("The entered path cannot be created") | /rss_readerCLI-5.0.1.tar.gz/rss_readerCLI-5.0.1/cool_project/conversion_to_format/conversion_to_pdf.py | 0.516595 | 0.159152 | conversion_to_pdf.py | pypi |
from pathlib import Path
from jinja2 import Environment, select_autoescape, FileSystemLoader
from cool_project.cervices.print_functions import error_print, info_print
from project_settings import FILE_NAME_HTML
def make_dir(path):
"""
Creating a folder at the got path.
If the folder already exists does nothing.
:param path: the path where the folder should be created
"""
if not Path(path).exists():
p = Path(path)
p.mkdir(parents=True)
def is_list(obj):
"""
Check obj is list.
:param obj: object
:return: True or False
"""
return isinstance(obj, list)
def get_env():
"""
Init Environment for jinja2
:return: env
"""
path = Path(__file__).parent
path = Path(path, "templates")
env = Environment(
loader=FileSystemLoader(path),
autoescape=select_autoescape(["html"]),
trim_blocks=True,
lstrip_blocks=True,
)
return env
def get_content(data, env):
"""
Creating a dictionary with content and a
dictionary with tests for env.
:param data: news
:param env: env
:return: content
"""
if not is_list(data[0]):
data = [data]
content = {
"title": "Feeds",
"feeds": data
}
env.tests["is_list"] = is_list
return content
def convert_to_html(data, path, verbose):
"""
Еhe function gets a list of dictionaries with
news, a path, and a verbose flag.
Initializes the Environment of jinja2, gets the HTML template,
runs the render, and saves the file to the received path.
:param data: a list of dictionaries with news
:param path: the path to save the file
:param verbose: verbose mode
"""
if verbose:
info_print("Conversion to HTML started")
env = get_env()
content = get_content(data, env)
template = env.get_template("template.html")
result = template.render(content)
path = Path(path)
if verbose:
info_print("Conversion to HTML ended")
try:
make_dir(path)
with open(Path(path, FILE_NAME_HTML), "w", encoding="utf-8") as file:
file.write(result)
info_print(
f"A feed in HTML format was saved on the path: "
f"{Path(path, FILE_NAME_HTML)}"
)
except Exception:
error_print("The entered path cannot be created") | /rss_readerCLI-5.0.1.tar.gz/rss_readerCLI-5.0.1/cool_project/conversion_to_format/conversion_to_html.py | 0.579162 | 0.359926 | conversion_to_html.py | pypi |
from requests import exceptions
import shutil
from math import ceil
from cool_project.cervices.print_functions import (
info_print, warning_print, error_print
)
def check_limit_type_value(func):
"""
Decorator which check type of limit value.
"""
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
try:
if result.limit is not None:
result.limit = int(result.limit)
except ValueError:
warning_print("You must enter the number in --limit params.")
return result
return wrapper
def intercept_errors(func):
"""Decorator which intercept a errors"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.ConnectionError:
error_print("Connection error. Please check your URL")
except exceptions.MissingSchema as exc:
error_print(exc)
except Exception as exc:
error_print("Unknown error")
return wrapper
def verbose_information_about_start_scrapping(func):
"""
Decorator which print information about start and end scrapping
"""
def wrapper(*args, **kwargs):
if args[0].verbose:
info_print("Start Scrapping")
result = func(*args, **kwargs)
info_print("Stop Scrapping")
else:
result = func(*args, **kwargs)
return result
return wrapper
def get_data_for_print_delimiter(word):
"""
Get data for print separator for function
:param word: a word which will be printed in center of a separator
:return: left_columns_count, right_columns_count, word
"""
columns = shutil.get_terminal_size().columns
if word is None:
word = ""
else:
word = f" {word} "
columns_count = columns - len(word)
left_columns_count = ceil(columns_count / 2)
right_columns_count = columns_count - left_columns_count
return left_columns_count, right_columns_count, word
def decorator_delimiter(start_word=None, end_word=None, calls_stat=False):
"""
Decorator which print separate line with
start_word and end_word value,
if calls_stat is True also print how much times function was called,
if start_word or end_word value is None value will not be print.
:param start_word: a word which will be printed
in center of a start separator
:param end_word: a word which will be printed
in center of a end separator
:param calls_stat: print or not how much times function was called
"""
count_of_calls = 1
def decorator(func):
def wrapper(*args, **kwargs):
nonlocal count_of_calls, start_word
word = start_word
if calls_stat:
if start_word is not None:
word = f"{start_word} {count_of_calls}"
count_of_calls += 1
left_columns_count, right_columns_count, word = get_data_for_print_delimiter(word)
print(
f"{'-' * left_columns_count}{word}{'-' * right_columns_count}"
)
result = func(*args, **kwargs)
left_columns_count, right_columns_count, word = get_data_for_print_delimiter(end_word)
print(
f"{'-' * left_columns_count}{word}{'-' * right_columns_count}"
)
return result
return wrapper
return decorator | /rss_readerCLI-5.0.1.tar.gz/rss_readerCLI-5.0.1/cool_project/cervices/decorators.py | 0.69285 | 0.21262 | decorators.py | pypi |
import json
from colorama import init, Fore, Style, deinit
from cool_project.cervices.decorators import decorator_delimiter
def console_output_feed(news, colorize):
"""
Function which print news in console in standard format.
:param colorize: the flag which shows that need paint output
:param news: list of dicts which contains news information
"""
print('\n') # line break for correct output
channel_data = news[0]
if colorize:
init(autoreset=True, strip=False)
print(
Style.BRIGHT + Fore.GREEN + f"Feed source:",
f"{channel_data['source']}"
)
print(
Style.BRIGHT + Fore.GREEN + f"Feed:",
Fore.MAGENTA + f"{channel_data['channel_title']}" + Style.DIM,
end="\n\n"
)
else:
print(f"Feed source: {channel_data['source']}")
print(f"Feed: {channel_data['channel_title']}", end="\n\n")
for item in news[1:]:
output_feed(item, colorize)
print() # line break for correct output
if colorize:
deinit()
@decorator_delimiter("News", calls_stat=True)
def output_feed(news, colorize):
"""
Function which processing dictionary and if value is None
don't print pair - key, value in console. If value contain text
print pair - key, value which contain information about news.
:param colorize: the flag which shows that need paint output
:param news: dict which contain information about news
"""
for key, value in news.items():
if value is None or value == []:
line_break(key)
else:
if isinstance(value, list):
value = rebuild_value(value)
line_break(key)
if colorize:
if key == "description" or key == "more_description":
print(
Style.BRIGHT + Fore.GREEN +
f"{key.title().replace('_', ' ')}:",
f"{value.replace(' ', ' ')}"
)
elif key == "title":
print(
Style.BRIGHT + Fore.RED +
f"{key.title().replace('_', ' ')}:",
Style.BRIGHT + Fore.YELLOW +
f"{value.replace(' ', ' ')}"
)
else:
print(
Style.BRIGHT + Fore.CYAN +
f"{key.title().replace('_', ' ')}:",
f"{value.replace(' ', ' ')}"
)
else:
print(
f"{key.title().replace('_', ' ')}: "
f"{value.replace(' ', ' ')}"
)
def line_break(key):
"""
Print line breaks when key in dict equals description or comments
:param key: key of dictionary
"""
if key == "description" or key == "comments":
print() # line break for correct output
def rebuild_value(value):
"""
Function rebuild value to string
:param value: value which is a list
:return: string
"""
value = ", ".join(value)
return value
def console_json_output(data):
"""
function which processing python data to json format
and print it to the console.
:param data: list of dicts which contains news information
:return: list of dicts which contains news information
"""
json_formatted_text = json.dumps(data, indent=4, ensure_ascii=False)
print(json_formatted_text) | /rss_readerCLI-5.0.1.tar.gz/rss_readerCLI-5.0.1/cool_project/cervices/data_output.py | 0.438304 | 0.259034 | data_output.py | pypi |
import re
import urllib.parse
import asyncio
from torss.feed import Channel, Feed, Item
from torss.utils import fetch_bs, expect
async def fetch_urls(session):
url = "https://www.economist.com/weeklyedition"
soup = await fetch_bs(session, url)
world_this_week = soup.find(re.compile(r"h\d"), string="The world this week")
expect(world_this_week, "Couldn't get 'The world this week section'")
section = world_this_week.find_parent("section")
politics = section.find("a", string=re.compile(r"Politics.*"))
business = section.find("a", string=re.compile(r"Business.*"))
# KAL's uses a real typographic apostrophe (KAL’s cartoon) so to be safe,
# let's skip it entirely with regular expression
kal = section.find("a", string=re.compile(r"KAL.*cartoon"))
urljoin = urllib.parse.urljoin
ret = {}
if politics:
ret["politics"] = urljoin(url, politics["href"])
if business:
ret["business"] = urljoin(url, business["href"])
if kal:
ret["kal"] = urljoin(url, kal["href"])
return ret
def lead_img_section_filter(tag):
if tag.name != "section":
return False
figures = tag.find_all("figure")
if len(figures) != 1:
return False
paragraphs = tag.find_all("p")
if len(paragraphs) > 0:
return False
fig = figures[0]
return any(d.name == "img" for d in fig.descendants)
def body_section_filter(tag):
if tag.name != "section":
return False
paragraphs = tag.find_all("p")
return len(paragraphs) > 0
def body_filter(tag):
if tag.name == "p":
return "your browser does" not in tag.text.lower()
if tag.name in ("h1", "h2", "h3"):
return tag.text != "Listen on the go"
if tag.name == "img":
return True
return False
async def parse_article(session, url):
contents = []
soup = await fetch_bs(session, url)
main = soup.find("main", id="content")
lead_section = main.find(lead_img_section_filter)
if lead_section:
contents.append(lead_section.find("img"))
body = main.find(body_section_filter)
contents.extend(body.find_all(body_filter))
return "\n".join(str(elem) for elem in contents)
async def politics(session, urls):
expect("politics" in urls, "URL for Politics this week not found")
url = urls["politics"]
ch = Channel("The Economist: Politics this week", "https://www.economist.com")
ch.items.append(
Item(
title="Politics this week",
link=url,
content=await parse_article(session, url),
)
)
return Feed(ch, "politics.xml")
async def business(session, urls):
expect("business" in urls, "URL for Business this week not found")
url = urls["business"]
ch = Channel("The Economist: Business this week", "https://www.economist.com")
ch.items.append(
Item(
title="Business this week",
link=url,
content=await parse_article(session, url),
)
)
return Feed(ch, "business.xml")
async def kal(session, urls):
expect("kal" in urls, "URL for KAL's cartoon not found")
url = urls["kal"]
ch = Channel("The Economist: KAL's cartoon", "https://www.economist.com")
ch.items.append(
Item(
title="KAL's cartoon",
link=url,
content=await parse_article(session, url),
)
)
return Feed(ch, "kal.xml")
async def run(session, **kw):
urls = await fetch_urls(session)
feeds = await asyncio.gather(
politics(session, urls), business(session, urls), kal(session, urls)
)
return feeds | /rss_scrap-0.2.1-py3-none-any.whl/torss/feeds/economist.py | 0.436142 | 0.297929 | economist.py | pypi |
# rssfixer
<!-- CODE:BASH:START -->
<!-- echo '[](https://github.com/marketplace/actions/super-linter)' -->
<!-- echo '' -->
<!-- echo '[](https://github.com/reuteras/rssfixer/actions?query=workflow%3ACodeQL)' -->
<!-- echo '[](https://github.com/reuteras/rssfixer/)' -->
<!-- if jq '.metrics._totals | ."SEVERITY.HI"' resources/bandit.json|grep -vE '^0' > /dev/null;then cl='red';elif jq '.metrics._totals' resources/bandit.json|grep "SEVERITY"|grep -E ' 0,'|wc -l|grep -vE '4$' > /dev/null;then cl='yellow';else cl='green';fi echo -n '[](https://github.com/PyCQA/bandit)' -->
<!-- CODE:END -->
<!-- OUTPUT:START -->
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
[](https://github.com/marketplace/actions/super-linter)

[](https://github.com/reuteras/rssfixer/actions?query=workflow%3ACodeQL)
[](https://github.com/reuteras/rssfixer/)
<!-- OUTPUT:END -->
A tool to generate an [RSS][rss] feed from some [WordPress][wor] blogs and other sources that for some reason don't generate their own feeds. This tool uses [BeautifulSoup][bso] to parse the HTML and [feedgen][fge] to generate the feed. I created this tool to be to follow news from companies that have forgotten the usefulness of RSS.
## Installation
Create a virtual environment and simply run `python3 -m pip install rssfixer`, full example below.
```bash
python3 -m venv venv
source venv/bin/activate
python3 -m pip install rssfixer
```
## Supported page types
I've expanded the tools to blogs that I like to follow. If you have suggestions to add/change functionality please open an [issue][iss] or start a new [discussion][dis].
The basic formats of web pages supported are:
- `--list` - links are in simple ul-list
- `--json` - links, titles and sometimes description is accessible in a JSON structure
- `--html` - links and titles can be found by some unique HTML element
- `--release` - similar to `--html` except there are no links and you have to specify a target URL
During testing it is useful to use `--stdout`option to see the generated feed. When I have time (and enough motivation) I might write a tool to try and find the right combination of options for a specified URL.
### Simple list
An example to generate a feed for [nccgroup][ncc] that have the links in a simple ul-list by using the `--list` option:
```bash
$ rssfixer --title nccgroup --list https://research.nccgroup.com/
RSS feed created: rss_feed.xml
```
You can specify a filename and silence output:
```bash
rssfixer --title nccgroup --output nccgroup.xml --quiet https://research.nccgroup.com/
```
The resulting file is available [here][exa] as an example.
Most times you would run the script from crontab to have an updated feed. Here is an example with a venv in _/home/user/src/rssfixer_.
```bash
32 * * * * /home/user/src/rssfixer/bin/rssfixer --title nccgroup --output /var/www/html/feeds/nccgroup.xml --quiet --list https://research.nccgroup.com
```
### JSON
Some blogs like [truesec.com][tru] have all blog links in a JSON object. You can use the `--json` option to parse the JSON object and generate a feed. The same is true for Apple's [security blog][app] page.
An example for [Apple][app]:
```bash
rssfixer --title "Apple Security" --output apple.xml --quiet --json --json-entries blogs --json-url slug --base-url https://security.apple.com/blog/ https://security.apple.com/blog
```
In this example `--json-entries blogs`specifies that blog entries are located in a key called __blogs__ and that URLs are available in a key called __slug__. Since the URL only includes the key (or slug) we specify the full URL to the blog with `--base-url https://security.apple.com/blog/`.
An example for [truesec.com][tru]:
```bash
rssfixer --title Truesec --json --json-description preamble --quiet --output truesec.xml https://www.truesec.com/hub/blog
```
Here we must specify `--json-description preamble` to find the description or summary of the blog post.
### General HTML
Pages with a more general HTML structure can be parsed with the `--html` option. You can specify the HTML tag for the entries, the URL and title of the blog entry.
An example for [tripwire.com][tri]:
```bash
rssfixer --title Tripwire --output tripwire.xml --quiet --html --base-url https://www.tripwire.com http://www.tripwire.com/state-of-security
```
### Release
Check for one entity on release pages like [SQLite][sql] (h3) and generate RSS feed with links to the download page (required argument `--release-url`). Easy way to get notified when a new version is released.
```bash
rssfixer --release --output sqlite.xml --release-entries h3 --release-url https://sqlite.org/download.html https://sqlite.org/changes.html
```
### Usage
Command-line options (updated on commit by [markdown-code-runner][mcr]):
<!-- CODE:BASH:START -->
<!-- echo '```Text' -->
<!-- poetry run rssfixer --help -->
<!-- echo '```' -->
<!-- CODE:END -->
<!-- OUTPUT:START -->
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
```Text
usage: rssfixer [-h] (--html | --json | --list | --release) [--version]
[--atom] [--base-url BASE_URL] [--release-url RELEASE_URL]
[--release-entries RELEASE_ENTRIES]
[--html-entries HTML_ENTRIES]
[--html-entries-class HTML_ENTRIES_CLASS]
[--html-url HTML_URL] [--html-title HTML_TITLE]
[--html-title-class HTML_TITLE_CLASS]
[--title-filter TITLE_FILTER]
[--html-description HTML_DESCRIPTION]
[--html-description-class HTML_DESCRIPTION_CLASS]
[--json-entries JSON_ENTRIES] [--json-url JSON_URL]
[--json-title JSON_TITLE]
[--json-description JSON_DESCRIPTION] [--output OUTPUT]
[--title TITLE] [--user-agent USER_AGENT]
[--filter-type FILTER_TYPE] [--filter-name FILTER_NAME] [-q]
[-d] [--stdout]
url
Generate RSS feed for blog that don't publish a feed. Default is to find links
in a simple <ul>-list. Options are available to find links in other HTML
elements or JSON strings.
positional arguments:
url URL for the blog
options:
-h, --help show this help message and exit
--html Find entries in HTML
--json Find entries in JSON
--list Find entries in HTML <ul>-list (default)
--release Find releases in HTML
--version show program's version number and exit
--atom Generate Atom feed
--base-url BASE_URL Base URL for the blog
--release-url RELEASE_URL
Release URL for downloads
--release-entries RELEASE_ENTRIES
Release selector for entries
--html-entries HTML_ENTRIES
HTML selector for entries
--html-entries-class HTML_ENTRIES_CLASS
Class name for entries
--html-url HTML_URL HTML selector for URL
--html-title HTML_TITLE
HTML selector for title
--html-title-class HTML_TITLE_CLASS
Flag to specify title class (regex)
--title-filter TITLE_FILTER
Filter for title, ignore entries that don't match
--html-description HTML_DESCRIPTION
HTML selector for description
--html-description-class HTML_DESCRIPTION_CLASS
Flag to specify description class (regex)
--json-entries JSON_ENTRIES
JSON key for entries (default: 'entries')
--json-url JSON_URL JSON key for URL (default: 'url')
--json-title JSON_TITLE
JSON key for title
--json-description JSON_DESCRIPTION
JSON key for description
--output OUTPUT Name of the output file
--title TITLE Title of the RSS feed (default: "My RSS Feed")
--user-agent USER_AGENT
User agent to use for HTTP requests
--filter-type FILTER_TYPE
Filter web page
--filter-name FILTER_NAME
Filter web page
-q, --quiet Suppress output
-d, --debug Debug selection
--stdout Print to stdout
```
<!-- OUTPUT:END -->
## Command-line examples for blogs
```bash
# Apple Security Blog
# Url: https://security.apple.com/blog/
rssfixer --title "Apple Security" --output apple.xml --quiet --json --json-entries blogs --json-url slug --base-url https://security.apple.com/blog/ https://security.apple.com/blog
# nccgroup
# Url: https://research.nccgroup.com/
rssfixer --title nccgroup --output nccgroup.xml --quiet --list https://research.nccgroup.com
# Tripwire
# Url: https://www.tripwire.com/state-of-security
rssfixer --title Tripwire --output tripwire.xml --quiet --html --base-url https://www.tripwire.com http://www.tripwire.com/state-of-security
# TrueSec
# Url: https://www.truesec.com/hub/blog
rssfixer --title Truesec --output truesec.xml --quiet --json --json-description preamble https://www.truesec.com/hub/blog
# SQLite
# Url: https://sqlite.org/changes.html
rssfixer --title SQLite --release --release-entries h3 --release-url https://sqlite.org/download.html https://sqlite.org/changes.html
# Nucleus
# https://nucleussec.com/category/cisa-kev
rssfixer --title "Nucleus CISA KEV" --output nucleus.xml --html --filter-type div --filter-name recent-post-widget --html-entries div --html-title div --html-title-class "post-desc" --title-filter KEV https://nucleussec.com/category/cisa-kev
# NCSC-SE
# https://www.ncsc.se/publikationer/
rssfixer --html --filter-type div --filter-name 'page-container' --html-entries div --html-entries-class "news-text" --html-title h2 --html-title-class "" --html-description p --html-description-class "" --html-url a --base-url https://www.ncsc.se --stdout --atom --title "Feed for NCSC-SE" https://www.ncsc.se/publikationer/
```
If you have other example use case please add them in [show usage examples][sue] in discussions.
[app]: https://security.apple.com/blog/
[bso]: https://www.crummy.com/software/BeautifulSoup/
[dis]: https://github.com/reuteras/rssfixer/discussions
[exa]: https://github.com/reuteras/rssfixer/blob/main/src/tests/data/output/nccgroup.xml
[fge]: https://feedgen.kiesow.be/
[iss]: https://github.com/reuteras/rssfixer/issues
[mcr]: https://github.com/basnijholt/markdown-code-runner
[ncc]: https://research.nccgroup.com/
[rss]: https://www.rssboard.org/
[sql]: https://sqlite.org/changes.html
[sue]: https://github.com/reuteras/rssfixer/discussions/categories/show-usage-examples
[tri]: https://www.tripwire.com/state-of-security
[tru]: https://www.truesec.com/hub/blog
[wor]: https://wordpress.org/
| /rssfixer-0.2.1.tar.gz/rssfixer-0.2.1/README.md | 0.550849 | 0.691823 | README.md | pypi |
import asyncio as aio
import traceback
from abc import ABC, abstractmethod
from typing import List, Tuple
import codefast as cf
import jieba
from codefast.exception import get_exception_str
from rss.base.bm25 import BM25
from rss.base.sif import sif_embeddings, top_k_similar_sentences, word2vec
from rss.base.todb import insert_many, load_all
from rss.base.types import Article
from rss.core.tg import tcp
from rss.data.db import db as rssdb
def get_exception_str(e: Exception) -> str:
return str(e) + '\n' + traceback.format_exc()
class Component(ABC):
@abstractmethod
def process(self, *args, **kwargs):
pass
def exec(self, *args, **kwargs):
class_name = self.__class__.__name__
file_name = self.__class__.__module__.split('.')[-1]
cf.info('pipeline starts exec [{}], args {}, kwargs {}'.format(
file_name + "." + class_name, args, kwargs))
results = self.process(*args, **kwargs)
cf.info('pipeline finish exec [{}], results: {}'.format(
class_name, results))
return results
class Pipeline(object):
def add(self, component: Component):
self.components.append(component)
return self
def __init__(self) -> None:
self.components = []
self.source_input = None
def set_source_input(self, source_input):
self.source_input = source_input
return self
def process(self):
results = self.source_input
try:
for c in self.components:
if results is not None:
results = c.exec(results)
else:
results = c.exec()
except Exception as e:
cf.error(get_exception_str(e))
tcp.post(get_exception_str(e))
return results
class PostToTelegram(Component):
def process(self, articles: List[Article]) -> List[Article]:
succ_posts = []
for article in articles:
try:
cf.info('posting new article: {}'.format(article))
tcp.post(article.telegram_format())
succ_posts.append(article)
except Exception as e:
exception_str = get_exception_str(e)
cf.error(exception_str)
tcp.post(exception_str)
return succ_posts
class SaveToDB(Component):
def process(self, articles: List[Article]) -> List[Article]:
if not articles:
return articles
d = {'rss_title_' + str(a.title): str(a) for a in articles}
try:
loop = aio.get_event_loop()
except RuntimeError:
loop = aio.new_event_loop()
aio.set_event_loop(loop)
loop.run_until_complete(insert_many(d))
return articles
class MarkPostedArticlesToDB(Component):
def process(self, articles: List[Article]) -> List[Article]:
for article in articles:
rssdb.set(article.uid, 1)
return articles
class FilterPosted(Component):
def process(self, articles: List[Article]) -> List[Article]:
nposts = [
article for article in articles if not rssdb.exists(article.uid)
]
for article in nposts:
cf.info('found new article: {}'.format(article))
return nposts
def get_posted(update=False):
local = 'posted.json'
if not update:
return cf.js(local)
try:
loop = aio.get_event_loop()
except RuntimeError:
loop = aio.new_event_loop()
aio.set_event_loop(loop)
resp = loop.run_until_complete(load_all())
js = {r[1]: r[2] for r in resp}
cf.js.write(js, local)
return js
class FilterPostedBM25(Component):
def __get_corpus(self) -> List[List[str]]:
js = get_posted(update=True)
cf.js.write(js, 'posted.json')
corpus = [_.replace('rss_title_', '') for _ in js.keys()]
return corpus
def __is_article_posted(self, title: str, model, corpus_embedding,
corpus) -> bool:
top1 = top_k_similar_sentences(title,
corpus,
model=model,
k=1,
candidate_embedding=corpus_embedding)
cf.info(top1)
score = top1[0][0]
return score > 0.8
def __get_model(self):
corpus = self.__get_corpus()
model = word2vec(corpus)
corpus_embedding = sif_embeddings(corpus, model)
return model, corpus_embedding, corpus
def process(self, articles: List[Article]) -> List[Article]:
model, corpus_embedding, corpus = self.__get_model()
return [
a for a in articles if not self.__is_article_posted(
a.title, model, corpus_embedding, corpus)
] | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/base/pipeline.py | 0.654011 | 0.153454 | pipeline.py | pypi |
import math
from typing import Any, Dict, List, Optional, Set, Tuple, Union
class BM25(object):
"""
Best Match 25.
Parameters
----------
k1 : float, default 1.5
b : float, default 0.75
Attributes
----------
tf_ : list[dict[str, int]]
Term Frequency per document. So [{'hi': 1}] means
the first document contains the term 'hi' 1 time.
df_ : dict[str, int]
Document Frequency per term. i.e. Number of documents in the
corpus that contains the term.
idf_ : dict[str, float]
Inverse Document Frequency per term.
doc_len_ : list[int]
Number of terms per document. So [3] means the first
document contains 3 terms.
corpus_ : list[list[str]]
The input corpus.
corpus_size_ : int
Number of documents in the corpus.
avg_doc_len_ : float
Average number of terms for documents in the corpus.
"""
def __init__(self, k1=1.5, b=0.75):
self.b = b
self.k1 = k1
def fit(self, corpus):
"""
Fit the various statistics that are required to calculate BM25 ranking
score using the corpus given.
Parameters
----------
corpus : list[list[str]]
Each element in the list represents a document, and each document
is a list of the terms.
Returns
-------
self
"""
tf = []
df = {}
idf = {}
doc_len = []
corpus_size = 0
for document in corpus:
corpus_size += 1
doc_len.append(len(document))
# compute tf (term frequency) per document
frequencies = {}
for term in document:
term_count = frequencies.get(term, 0) + 1
frequencies[term] = term_count
tf.append(frequencies)
# compute df (document frequency) per term
for term, _ in frequencies.items():
df_count = df.get(term, 0) + 1
df[term] = df_count
for term, freq in df.items():
idf[term] = math.log(1 + (corpus_size - freq + 0.5) / (freq + 0.5))
self.tf_ = tf
self.df_ = df
self.idf_ = idf
self.doc_len_ = doc_len
self.corpus_ = corpus
self.corpus_size_ = corpus_size
self.avg_doc_len_ = sum(doc_len) / corpus_size
return self
def search(self, query: List[str]) -> List[float]:
scores = [
self._score(query, index) for index in range(self.corpus_size_)
]
return scores
def _score(self, query: List[str], index: int) -> float:
score = 0.0
doc_len = self.doc_len_[index]
frequencies = self.tf_[index]
for term in query:
if term not in frequencies:
continue
freq = frequencies[term]
numerator = self.idf_[term] * freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * doc_len / self.avg_doc_len_)
score += (numerator / denominator)
return score
def get_top_n(self, query: List[str], n: int = 5) -> List[Tuple[float,str]]:
scores = self.search(query)
pairs = [(s, c) for s, c in zip(scores, self.corpus_)]
pairs.sort(key=lambda x: x[0], reverse=True)
return pairs[:n]
if __name__ == '__main__':
# we'll generate some fake texts to experiment with
corpus = [
'Human machine interface for lab abc computer applications',
'A survey of user opinion of computer system response time',
'The EPS user interface management system',
'System and human system engineering testing of EPS',
'Relation of user perceived response time to error measurement',
'The generation of random binary unordered trees',
'The intersection graph of paths in trees',
'Graph minors IV Widths of trees and well quasi ordering',
'Graph minors A survey'
]
# remove stop words and tokenize them (we probably want to do some more
# preprocessing with our text in a real world setting, but we'll keep
# it simple here)
stopwords = set(['for', 'a', 'of', 'the', 'and', 'to', 'in'])
texts = [[
word for word in document.lower().split() if word not in stopwords
] for document in corpus]
# build a word count dictionary so we can remove words that appear only once
word_count_dict = {}
for text in texts:
for token in text:
word_count = word_count_dict.get(token, 0) + 1
word_count_dict[token] = word_count
texts = [[token for token in text if word_count_dict[token] > 1]
for text in texts]
print(texts)
# query our corpus to see which document is more relevant
query = 'The intersection of graph survey and trees'
query = [word for word in query.lower().split() if word not in stopwords]
bm25 = BM25()
bm25.fit(texts)
scores = bm25.search(query)
for score, doc in zip(scores, corpus):
score = round(score, 3)
print(str(score) + '\t' + doc) | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/base/bm25.py | 0.917709 | 0.528898 | bm25.py | pypi |
from typing import Any, List
from rich import print
import codefast as cf
import jieba
import numpy as np
from gensim.models import Word2Vec
def word2vec(texts:List[str])->Word2Vec:
tokens_list = [jieba.lcut(_) for _ in texts]
tokens_list = [list(filter(lambda x: len(x) > 1, _)) for _ in tokens_list]
model = Word2Vec(tokens_list, window=5, min_count=1, workers=4)
return model
def sif_embeddings(sentences:List[str], model:Word2Vec, alpha:float=1e-3):
"""Compute the SIF embeddings for a list of sentences
Parameters
----------
sentences : list
The sentences to compute the embeddings for
model : `~gensim.models.base_any2vec.BaseAny2VecModel`
A gensim model that contains the word vectors and the vocabulary
alpha : float, optional
Parameter which is used to weigh each individual word based on its probability p(w).
Returns
-------
numpy.ndarray
SIF sentence embedding matrix of dim len(sentences) * dimension
"""
REAL = np.float32
vlookup = model.wv.key_to_index # Gives us access to word index and count
vectors = model.wv # Gives us access to word vectors
size = model.vector_size # Embedding size
Z = sum(vectors.get_vecattr(k, "count")
for k in vlookup) # Total word count
output = []
for s in sentences:
tokens = [_ for _ in jieba.lcut(s) if _]
count = 0
v = np.zeros(size, dtype=REAL) # Summary vector
for w in tokens:
if w in vlookup:
v += (alpha /
(alpha +
(vectors.get_vecattr(w, 'count') / Z))) * vectors[w]
count += 1
output.append(v)
return np.vstack(output).astype(REAL)
def top_k_similar_sentences(sentence:str, corpus:List[str], model:Word2Vec, k:int=10, candidate_embedding=None)->List[str]:
"""Find the top k similar sentences to a given sentence
Parameters
----------
sentence : str
The sentence to find similar sentences for
candidates : list
The list of sentences to compare against
model : `~gensim.models.base_any2vec.BaseAny2VecModel`
A gensim model that contains the word vectors and the vocabulary
k : int, optional
The number of similar sentences to return
Returns
-------
list
The top k similar sentences
"""
sentence_embedding = sif_embeddings([sentence], model)[0]
if candidate_embedding is None:
candidate_embedding = sif_embeddings(corpus, model)
assert len(corpus) == len(candidate_embedding), 'corpus and candidate_embedding must have the same length'
cos_sim = np.dot(candidate_embedding, sentence_embedding) / \
(np.linalg.norm(candidate_embedding, axis=1) * np.linalg.norm(sentence_embedding))
top_k = np.argsort(cos_sim)[::-1][:k]
return [(cos_sim[i], corpus[i]) for i in top_k] | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/base/sif.py | 0.897308 | 0.450359 | sif.py | pypi |
RSS_URLS = [
('https://openai.com/blog/rss/', 'openai'),
('https://www.aitrends.com/feed/', 'aitrends'),
('https://venturebeat.com/category/ai/feed/', 'venturebeat'),
('https://www.wired.com/category/business/feed/', 'wired'),
('https://wanqu.co/feed/', '湾区日报'),
('https://feed.cnblogs.com/blog/u/409312/rss/', 'cnblogs-风雨中的小七'),
('https://rsshub.app/infoq/recommend', 'infoq'),
('https://sspai.com/feed', 'sspai'),
('https://rsshub.app/geektime/column/48', 'geektime'),
('https://rsshub.app/juejin/category/ai', '掘金AI'),
('https://rsshub.app/meituan/tech/home', '美团技术'),
('https://rsshub.app/blogs/paulgraham', 'PaulGraham'),
('https://www.msra.cn/feed', 'MSRADeepLearningArticles'),
('http://ai.stanford.edu/blog/feed.xml', 'standford nlp'),
('https://nullprogram.com/feed/', 'endless_author'),
# ('https://kexue.fm/feed', '科学空间'),
# ('https://stackoverflow.com/feeds/tag/nlp', 'StackoverFlowNLP'),
# ('https://stackoverflow.com/feeds/tag/keras', 'StackOverFlowKeras'),
# ('http://www.dydhhy.com/rsslatest.xml', '电影后花园'),
# ('https://techcrunch.com/feed/', 'techcrunch'),
# ('https://www.theverge.com/rss/frontpage', 'theverge'),
# ('https://www.wired.com/feed/rss', 'wired'),
('https://rsshub.app/nasa/apod', 'NASADailyPicture'),
('https://sspai.com/feed', '少数派'),
('https://machinelearningmastery.com/feed/', 'mlmastery'),
# ('https://nitter.it/seanwei001/rss', 'Twitter-seanwei001'),
('https://bird.trom.tf/Norathen/rss', 'Twitter-Norathen'),
# ('https://bird.trom.tf/StephenKing/rss', 'Twitter-stephen-king'),
('https://bird.trom.tf/stanfordnlp/rss', 'Twitter-StanfordNLP'),
# ('https://nitter.it/binanbijo_sekai/rss', 'Twitter-sekai'),
('https://cdn.werss.weapp.design/api/v1/feeds/dbea39f3-7e43-4df9-9f86-e78f4690ee10.xml', 'geekpark'),
('https://cdn.werss.weapp.design/api/v1/feeds/e7651950-5b24-4487-9995-b0b3468ea392.xml', 'almosthuman'),
('https://cdn.werss.weapp.design/api/v1/feeds/f63ef4b7-c6c4-4a94-8652-62a228a759c8.xml', 'ai_front'),
('https://cdn.werss.weapp.design/api/v1/feeds/e83b0f63-5c84-45f6-a44e-a5e1b1ea87f9.xml', 'paperweekly'),
('https://cdn.werss.weapp.design/api/v1/feeds/04db1eb7-f7b8-417d-8f42-5119feeb443e.xml', 'infoQ'),
('https://cdn.werss.weapp.design/api/v1/feeds/9e44c20a-b61b-44c6-9afd-800dd09a7508.xml', 'AIstuff'),
('https://cdn.werss.weapp.design/api/v1/feeds/1cfea1ef-3df6-47b1-9fd4-7f82ca026dd0.xml', 'TencentTech'),
('https://cdn.werss.weapp.design/api/v1/feeds/1c52f62d-1c3f-4fd5-9ffb-142253470fb9.xml', 'AINLP'),
('https://cdn.werss.weapp.design/api/v1/feeds/7ed2ef11-519c-4ea9-901b-6cf5cff952da.xml', '夕小瑶')
]
from rss.auth import auth
WECHAT_PUBLIC = {
'yuntoutiao': {
'main_url': auth.werss_yuntoutiao,
'source': '云头条',
'sub_type': 'yuntoutiao',
},
'huxiu': {
'main_url': 'https://www.wxkol.com/show/huxiu_com.html',
'source': '虎嗅网',
'sub_type': 'huxiu',
},
}
TELEGRAM = {'bot_name': 'hema_bot', 'channel_name': 'global_news_podcast'} | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/data/__init__.py | 0.479016 | 0.533701 | __init__.py | pypi |
""" 资源搬运工
"""
from abc import ABC, abstractmethod
from typing import Any, List
import codefast as cf
import feedparser
from bs4 import BeautifulSoup
from pydantic import BaseModel
from rss.base.pipeline import Component, Pipeline
from rss.core.tg import tcp
from rss.data.db import db as rssdb
from rss.utils import get_exception_str
class FeedBody(BaseModel):
title: str
link: str
summary: str
url: str = None
text: str = None
def __str__(self) -> str:
return "{}\n\n{}".format(self.text, self.url)
class ContentRetriver(ABC):
def __init__(self, url: str):
super().__init__()
self.url = url
@abstractmethod
def run(self) -> List[str]:
pass
class TelegramChannelRetriver(ContentRetriver):
def run(self) -> List[str]:
feeds = feedparser.parse(self.url)
contents = []
for feed in feeds.entries:
feed = FeedBody(**feed)
if '每日消费电子观' in feed.summary: continue
summary = BeautifulSoup(feed.summary, 'html.parser')
try:
feed.url = summary.find_all('a').pop().get('href')
feed.text = summary.find_all('p')[0].text
feed.text = feed.text.split('==')[0]
feed.text = feed.text.split(' | ')[0]
feed.text = feed.text.split(' - ')[0]
feed.text = feed.text.split("http")[0]
if not feed.text.endswith('。'): feed.text += '。'
except:
pass
if feed.url and feed.text:
contents.append(str(feed))
return contents
class GetContent(Component):
def process(self, retriver: ContentRetriver) -> List[str]:
return retriver.run()
class FilterEmpty(Component):
def process(self, contents: List[str]) -> List[str]:
return list(filter(lambda x: x, contents))
class FilterPosted(Component):
def process(self, contents: List[str]) -> List[str]:
return list(filter(lambda x: not rssdb.exists(cf.md5sum(x)), contents))
class FilterIthome(Component):
# 过滤掉 ithome 的文章,在 twitter 上没有预览
def process(self, contents: List[str]) -> List[str]:
return list(filter(lambda x: 'ithome' not in x, contents))
from rss.auth import auth
class PostToTwitter(Component):
def process(self, contents: List[str]) -> List[str]:
success_posts = []
for content in contents:
try:
cf.net.post(auth.cf_twitter_url, content)
cf.info('posting a tweet: {}'.format(content))
success_posts.append(content)
except Exception as e:
rssdb.set(cf.md5sum(content), 1)
exception_str = f'{content}\n{get_exception_str(e)}'
cf.error(exception_str)
tcp.post(exception_str)
return success_posts
class CacheSuccessPosts(Component):
def process(self, contents: List[str]) -> List[str]:
for content in contents:
md5 = cf.md5sum(content)
rssdb.set(md5, 1)
return contents
def media_port():
url = 'https://rsshub.app/telegram/channel/CE_Observe'
retriver = TelegramChannelRetriver(url)
(Pipeline()
.set_source_input(retriver)
.add(GetContent())
.add(FilterIthome())
.add(FilterEmpty())
.add(FilterPosted())
.add(PostToTwitter())
.add(CacheSuccessPosts())
.process()
)
if __name__ == "__main__":
media_port() | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/apps/mediaporter.py | 0.487795 | 0.151655 | mediaporter.py | pypi |
""" rss feed
"""
import traceback
from typing import List, Tuple
import codefast as cf
import feedparser
from rss.base.anynews import Article
from rss.base.pipeline import Component, Pipeline, PostToTelegram, FilterPosted, MarkPostedArticlesToDB, SaveToDB, FilterPostedBM25
from rss.core.tg import tcp
from rss.data import RSS_URLS
def get_exception_str(e: Exception) -> str:
return str(e) + '\n' + traceback.format_exc()
class GetRssSources(Component):
def process(self, sources_url:str):
try:
raise Exception('use build in rss urls')
resp = cf.net.get(sources_url).text
resp = cf.eval(resp)
cf.info('Get rss urls: {}'.format(resp))
return resp
except Exception as e:
cf.error(e)
return RSS_URLS
class GetArticles(Component):
def process(self, rss_urls:List[Tuple[str, str]])->List[Article]:
articles = []
for url, source in rss_urls:
cf.info('parsing rss url: {}, source: {}'.format(url, source))
try:
feed = feedparser.parse(url)
for entry in feed.entries:
article = Article(
uid = entry.id if 'id' in entry else 'empty_id',
title = entry.title if 'title' in entry else 'empty_title',
url = entry.link if 'link' in entry else 'empty_link',
source = source,
author = entry.author if 'author' in entry else 'empty_author',
date = entry.published if 'published' in entry else 'empty_published')
# cf.info('found rss entry: {}'.format(article))
articles.append(article)
except Exception as e:
exception_str = f'{url}\n{get_exception_str(e)}'
cf.error(exception_str)
tcp.post(exception_str)
return articles
class FilterSpam(Component):
def _is_spam(self, title:str)->bool:
if '满足条件每人补贴8000元' in title.lower():
return True
return False
def process(self, articles: List[Article]) -> List[Article]:
return [article for article in articles if not self._is_spam(article.title)]
def rsshub_pipe():
pipeline = Pipeline()
pipeline.set_source_input('https://host.ddot.cc/rssurls')
pipeline\
.add(GetRssSources())\
.add(GetArticles())\
.add(FilterSpam())\
.add(FilterPosted())\
.add(FilterPostedBM25())\
.add(SaveToDB())\
.add(PostToTelegram())\
.add(MarkPostedArticlesToDB())
pipeline.process()
if __name__ == '__main__':
rsshub_pipe() | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/apps/rsshub.py | 0.491944 | 0.202759 | rsshub.py | pypi |
""" virmatch flash sale monitor
"""
import os
import time
from abc import ABC, abstractmethod
from typing import List
import codefast as cf
import feedparser
from codefast.patterns.pipeline import Component, Pipeline
from pydantic import BaseModel
from rss.auth import auth
from rss.core.tg import tcp
from rss.data.db import db as rssdb
from rss.utils import get_exception_str
class FeedBody(BaseModel):
title: str
link: str
summary: str
def __str__(self) -> str:
return "{}\n{}".format(self.title, self.summary)
class ContentRetriver(ABC):
def __init__(self, url: str):
super().__init__()
self.url = url
@abstractmethod
def run(self) -> List[str]:
pass
class GetContent(Component):
def process(self, url: str) -> List[str]:
feeds = feedparser.parse(url)
res = []
for feed in feeds.entries:
feed = FeedBody(**feed)
res.append(feed.title)
return res
class GetNewestContent(Component):
"""The getcontent component has delays"""
def process(self, url: str) -> List[str]:
lb, hb = 1200, 10000
mid = (lb + hb) // 2
url = f'{url}/searchQuery={mid}'
feeds = feedparser.parse(url)
res = []
for feed in feeds.entries:
feed = FeedBody(**feed)
res.append(feed.title)
return res
class FilterEmpty(Component):
def process(self, contents: List[str]) -> List[str]:
return list(filter(lambda x: x, contents))
class FilterPosted(Component):
def process(self, contents: List[str]) -> List[str]:
return list(filter(lambda x: not rssdb.exists(cf.md5sum(x)), contents))
class FilterHighQuality(Component):
def format(self, c: str) -> str:
c = c.replace('&', "%26").replace('#', "")
return c
def interest(self, c: str) -> bool:
keys = ['pikpak']
return any([k in c.lower() for k in keys])
def process(self, contents: List[str]) -> List[str]:
return [self.format(c) for c in contents if self.interest(c)]
class PostToTelegram(Component):
def process(self, contents: List[str]) -> List[str]:
for c in contents:
cf.net.post(auth.telegram_api,
json={
'channel': auth.tg_channel,
'message': c
})
return contents
class PopWindowsMessage(Component):
def process(self, contents: List[str]) -> List[str]:
for c in contents:
os.system(f'notify-send "VirFlash" "{c}"')
return contents
class CacheSuccessPosts(Component):
def process(self, contents: List[str]) -> List[str]:
for content in contents:
md5 = cf.md5sum(content)
rssdb.set(md5, 1, ex=60 * 60 * 24 * 7)
return contents
class TimeSleeper(Component):
def process(self, contents: List[str]) -> List[str]:
time.sleep(10)
return contents
def api():
url = 'https://cf.ddot.cc/rss/telegram/channel/hezu2'
Pipeline()\
.add(GetContent())\
.add(FilterEmpty())\
.add(FilterHighQuality())\
.add(FilterPosted())\
.add(PostToTelegram())\
.add(CacheSuccessPosts())\
.process(url)
if __name__ == "__main__":
while True:
api() | /rsspy-0.2.5.tar.gz/rsspy-0.2.5/rss/apps/dropbox_monitor.py | 0.698844 | 0.294291 | dropbox_monitor.py | pypi |
import string
from dataclasses import dataclass, field
from typing import Iterable, List
import docutils.nodes as nodes
from docutils.frontend import OptionParser
from docutils.parsers.rst import Parser
from docutils.utils import column_width, new_document
from pygls.lsp.methods import (
COMPLETION,
DOCUMENT_SYMBOL,
TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_OPEN,
)
from pygls.lsp.types import (
CompletionItem,
CompletionList,
CompletionParams,
DidChangeTextDocumentParams,
DidOpenTextDocumentParams,
DocumentSymbol,
DocumentSymbolParams,
Position,
Range,
SymbolKind,
)
from pygls.server import LanguageServer
@dataclass
class _Section:
name: str
start: int
end: int
subsections: List["_Section"] = field(default_factory=list)
def end_at(self, line: int):
self.end = line
if self.subsections:
self.subsections[-1].end_at(line)
def create_server(client_insert_text_interpretation: bool = True) -> LanguageServer:
rst_language_server = LanguageServer()
index = {
"footnotes": [],
"sections": {},
}
class FootnoteVisitor(nodes.SparseNodeVisitor):
def visit_footnote(self, node: nodes.footnote) -> None:
index["footnotes"].append(node)
def unknown_visit(self, node: nodes.Node) -> None:
pass
@rst_language_server.feature(TEXT_DOCUMENT_DID_OPEN)
def did_open(ls: LanguageServer, params: DidOpenTextDocumentParams):
file_content = params.text_document.text
rst = parse_rst(file_content)
rst.walk(FootnoteVisitor(rst))
@rst_language_server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls: LanguageServer, params: DidChangeTextDocumentParams):
doc_id = params.text_document.uri
new_doc = ls.workspace.get_document(doc_id)
# Rebuild footnotes index
index["footnotes"].clear()
rst = parse_rst(new_doc.source)
rst.walk(FootnoteVisitor(rst))
@rst_language_server.feature(COMPLETION)
def completion(params: CompletionParams):
completion_items = []
completion_items += list(_complete_footnote_references(params))
completion_items += list(_complete_headings(params))
return CompletionList(
is_incomplete=False,
items=completion_items,
)
def _complete_footnote_references(
params: CompletionParams,
) -> Iterable[CompletionItem]:
completions = []
for fn in index["footnotes"]:
label = fn["names"][0]
if "auto" in fn:
label = "#" + label
paragraphs = [
child for child in fn.children if child.tagname == "paragraph"
]
completion_detail = paragraphs[0].astext() if paragraphs else None
completion = CompletionItem(
label=label, insert_text=f"{label}]_", detail=completion_detail
)
completions.append(completion)
return completions
def _complete_headings(params: CompletionParams) -> Iterable[CompletionItem]:
current_line_index = params.position.line
previous_line_index = current_line_index - 1
current_line_length = params.position.character
if current_line_length == 0 or previous_line_index < 0:
return ()
document_uri = params.text_document.uri
doc = rst_language_server.workspace.get_document(document_uri)
document_content: str = doc.source
if not document_content:
return ()
lines = document_content.splitlines()
adornment_char = lines[current_line_index][-1]
if not adornment_char or adornment_char not in string.punctuation:
return ()
consists_of_one_char = (
lines[current_line_index] == len(lines[current_line_index]) * adornment_char
)
if not consists_of_one_char:
return ()
title_width = column_width(lines[previous_line_index])
if current_line_length >= title_width:
return ()
if client_insert_text_interpretation:
insert_text = title_width * adornment_char
else:
insert_text = (title_width - current_line_length) * adornment_char
return (
CompletionItem(
label=3 * adornment_char,
insert_text=insert_text,
),
)
@rst_language_server.feature(DOCUMENT_SYMBOL)
def symbols(ls: LanguageServer, params: DocumentSymbolParams):
doc_id = params.text_document.uri
index["sections"][doc_id] = []
document = ls.workspace.get_document(doc_id)
rst = parse_rst(document.source)
class SymbolVisitor(nodes.SparseNodeVisitor):
def __init__(self, doc: nodes.document):
super().__init__(doc)
self.section_stack = []
def visit_section(self, node: nodes.section) -> None:
section_title = node[0]
section_start = node.line - 2
top_level_sections = index["sections"][doc_id]
s = _Section(
name=section_title.astext(),
start=section_start,
end=-1,
)
if top_level_sections:
# End all sections at the same or deeper level
top_level_sections[-1].end_at(section_start - 1)
# If there is a section at a higher level, add this section as a subsection
if self.section_stack:
self.section_stack[-1].subsections.append(s)
else:
# Add a new top-level section
index["sections"][doc_id].append(s)
self.section_stack.append(s)
def depart_section(self, node: nodes.section) -> None:
self.section_stack.pop()
def unknown_visit(self, node: nodes.Node) -> None:
pass
rst.walkabout(SymbolVisitor(rst))
if index["sections"][doc_id]:
index["sections"][doc_id][-1].end_at(len(document.lines) - 1)
symbols = []
for s in index["sections"][doc_id]:
symbol = _to_symbol(document.lines, s)
symbols.append(symbol)
return symbols
return rst_language_server
def _to_symbol(lines: List[str], s: _Section) -> DocumentSymbol:
name, start, end = s.name, s.start, s.end
last_line_length = len(lines[end])
section_range = Range(
start=Position(line=start, character=0),
end=Position(line=end, character=last_line_length - 1),
)
symbol = DocumentSymbol(
name=name,
kind=SymbolKind.Class,
range=section_range,
selection_range=section_range,
children=[_to_symbol(lines, subsection) for subsection in s.subsections],
)
return symbol
def parse_rst(text: str) -> nodes.document:
rst_parser = Parser()
components = (Parser,)
default_settings = dict(report_level=3) # Report errors and worse
settings = OptionParser(
components=components, defaults=default_settings
).get_default_values()
document = new_document("rst document", settings=settings)
rst_parser.parse(text, document)
return document | /rst_language_server-0.4.0-py3-none-any.whl/rst_language_server/server.py | 0.579519 | 0.238933 | server.py | pypi |
import logging
from textwrap import indent
from typing import IO, Any, Dict, Iterable, List, NamedTuple, Optional, Set
from markdown_it.token import Token
from mdformat.plugins import PARSER_EXTENSIONS
from mdformat.renderer import LOGGER, MDRenderer, RenderContext, RenderTreeNode
from mdformat.renderer._util import longest_consecutive_sequence
from .markdownit import MarkdownItRenderer, RenderOutput
from .parser import to_docutils_ast
from .utils import yaml_dump
def _unprocessed_render(node: RenderTreeNode, context: RenderContext) -> str:
"""Text that should not be processed in any way (e.g. escaping characters)."""
return node.content
def _front_matter_tokens_render(node: RenderTreeNode, context: RenderContext) -> str:
"""Special render for front-matter whose values also need to be rendered."""
dct = {}
for child in node.children:
path = child.meta["key_path"]
value = (
"\n\n".join(subchild.render(context) for subchild in child.children)
if child.children
else True
)
subdct = dct
for key in path[:-1]:
subdct.setdefault(key, {})
subdct = subdct[key]
subdct[path[-1]] = value
text = yaml_dump(dct).rstrip()
return f"---\n{text}\n---"
def _sub_renderer(node: RenderTreeNode, context: RenderContext) -> str:
"""Render a substitution."""
return f"{{{{ {node.content} }}}}"
def _directive_render(node: RenderTreeNode, context: RenderContext) -> str:
"""Directive render, for handling directives that may contain child elements."""
# special directives that should only be used within substitutions
if node.meta["module"].endswith("misc.Replace") and node.children:
return "\n\n".join(child.render(context) for child in node.children[-1])
if node.meta["module"].endswith("misc.Date"):
return "{sub-ref}`today`"
# TODO handle unicode directive
name = node.meta["name"]
info_str = option_block = code_block = ""
if node.children and node.children[0].type == "directive_arg":
info_str = "".join(child.render(context) for child in node.children[0])
info_str = " ".join(info_str.splitlines()).strip()
if info_str:
info_str = " " + info_str
if node.meta["options_list"]:
yaml_str = yaml_dump(
{
key: (True if val is None else (int(val) if val.isnumeric() else val))
for key, val in node.meta["options_list"]
}
)
option_block = indent(yaml_str, ":", lambda s: True).strip()
if node.children and node.children[-1].type == "directive_content":
content = "\n\n".join(child.render(context) for child in node.children[-1])
if not option_block and content.startswith(":"):
# add a new-line, so content is not treated as an option
content = "\n" + content
elif option_block and content:
# new lines between options and content
option_block += "\n\n"
code_block = content
if option_block or code_block:
# new line before closing fence
code_block += "\n"
# Info strings of backtick code fences can not contain backticks or tildes.
# If that is the case, we make a tilde code fence instead.
if node.markup and ":" in node.markup:
fence_char = ":"
elif "`" in info_str or "~" in info_str:
fence_char = "~"
else:
fence_char = "`"
# The code block must not include as long or longer sequence of `fence_char`s
# as the fence string itself
fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)
fence_str = fence_char * fence_len
return f"{fence_str}{{{name}}}{info_str}\n{option_block}{code_block}{fence_str}"
class AdditionalRenderers:
RENDERERS = {
"unprocessed": _unprocessed_render,
"front_matter_tokens": _front_matter_tokens_render,
"substitution_block": _sub_renderer,
"substitution_inline": _sub_renderer,
"directive": _directive_render,
}
def from_tokens(
output: RenderOutput,
*,
consecutive_numbering: bool = True,
warning_stream: Optional[IO] = None,
) -> str:
"""Convert markdown-it tokens to text."""
md_renderer = MDRenderer()
# TODO option for consecutive numbering consecutive_numbering, etc
options = {
"parser_extension": [
PARSER_EXTENSIONS[name]
for name in ["myst", "tables", "frontmatter", "deflist"]
]
+ [AdditionalRenderers],
"mdformat": {"number": consecutive_numbering},
}
# temporarily redirect mdformat logging
warning_handler = None
if warning_stream:
warning_handler = logging.StreamHandler(warning_stream)
warning_handler.setLevel(logging.WARNING)
LOGGER.addHandler(warning_handler)
try:
# mdformat outputs only used reference definitions during 'finalize'
# instead we want to output all parsed reference definitions
text = md_renderer.render(output.tokens, options, output.env, finalize=False)
if output.env["references"]:
if text:
text += "\n\n"
output.env["used_refs"] = set(output.env["references"])
text += md_renderer._write_references(output.env)
finally:
if warning_handler:
LOGGER.removeHandler(warning_handler)
if text:
text += "\n"
return text
def get_myst_extensions(tokens: List[Token]) -> Set[str]:
"""Return the MyST extensions required to parse a token sequence."""
extensions = set()
for token in tokens:
if token.type == "substitution_inline" or token.type == "substitution_block":
extensions.add("substitution")
elif token.type == "front_matter_key_open":
key_path = token.meta.get("key_path")
if key_path and key_path[0] == "substitutions":
extensions.add("substitution")
elif token.type == "directive_open" and ":" in token.markup:
extensions.add("colon_fence")
elif (
token.type == "math_inline"
or token.type == "math_block"
or token.type == "math_block_eqno"
):
extensions.add("dollarmath")
elif token.type == "dl_open":
extensions.add("deflist")
return extensions
class ConvertedOutput(NamedTuple):
"""Output from ``rst_to_myst``."""
text: str
tokens: List[Token]
env: Dict[str, Any]
warning_stream: IO
extensions: Set[str]
def rst_to_myst(
text: str,
*,
warning_stream: Optional[IO] = None,
language_code="en",
use_sphinx: bool = True,
extensions: Iterable[str] = (),
conversions: Optional[Dict[str, str]] = None,
default_domain: str = "py",
default_role: Optional[str] = None,
raise_on_warning: bool = False,
cite_prefix: str = "cite_",
consecutive_numbering: bool = True,
colon_fences: bool = True,
dollar_math: bool = True,
) -> ConvertedOutput:
"""Convert RST text to MyST Markdown text.
:param text: The input RST text
:param warning_stream: The warning IO to write to
:param language_code: the language module to use,
for directive/role name translation
:param use_sphinx: Whether to load sphinx roles, directives and extentions
:param extensions: Sphinx extension to load
:param conversions: Overrides for mapping of how to convert directives;
directive module path -> conversion type
:param default_domain: name of the default sphinx domain
:param default_role: name of the default role, otherwise convert to a literal
:param cite_prefix: Prefix to add to citation references
:param raise_on_warning: Raise exception on parsing warning
:param consecutive_numbering: Apply consecutive numbering to ordered lists
:param colon_fences: Use colon fences for directives with parsed content
:param dollar_math: Convert math (where possible) to dollar-delimited math
"""
document, warning_stream = to_docutils_ast(
text,
warning_stream=warning_stream,
language_code=language_code,
use_sphinx=use_sphinx,
extensions=extensions,
default_domain=default_domain,
conversions=conversions,
)
token_renderer = MarkdownItRenderer(
document,
warning_stream=warning_stream,
cite_prefix=cite_prefix,
raise_on_warning=raise_on_warning,
default_role=default_role,
colon_fences=colon_fences,
dollar_math=dollar_math,
)
output = token_renderer.to_tokens()
myst_extension = get_myst_extensions(output.tokens)
output_text = from_tokens(
output,
consecutive_numbering=consecutive_numbering,
warning_stream=warning_stream,
)
return ConvertedOutput(
output_text, output.tokens, output.env, warning_stream, myst_extension
) | /rst_to_myst-0.4.0-py3-none-any.whl/rst_to_myst/mdformat_render.py | 0.668988 | 0.199795 | mdformat_render.py | pypi |
import re
import os
import operator
import subprocess
import io
import functools
try:
import importlib.metadata as importlib_metadata # type: ignore
except ImportError:
import importlib_metadata # type: ignore
import dateutil.parser
class Repl:
@classmethod
def from_defn(cls, defn):
"Return the first Repl subclass that works with this"
instances = (subcl(defn) for subcl in cls.__subclasses__())
return next(filter(None, instances))
def __init__(self, defn):
vars(self).update(defn)
def matches(self, text):
return re.match(self.pattern + '$', text)
def __bool__(self):
return False
class URLLinker(Repl):
r"""
Each replacement should have the form::
{
pattern: "Issue #?(?P<number>\d+)",
url: "{bitbucket}/jaraco/rst.linker/issues/{number}",
bitbucket: https://bitbucket.org
}
Currently, each named group must be unique across all Repl objects used
in a replacement.
"""
def replace(self, match, replacer_vars):
text = match.group(0)
ns = match.groupdict()
ns.update(vars(self))
ns.update(replacer_vars)
hyperlink = '`{text} <{href}>`_'
return hyperlink.format(text=text, href=self.url.format(**ns))
def __bool__(self):
return 'url' in vars(self)
class SCMTimestamp(Repl):
r"""
Replace content with a version number to include the date stamp
from the SCM.
For example, consider a changelog with the following::
1.0
---
Changed something.
The following replacement definition would add a datestamp
after the heading::
{
pattern: r"(?m:^((?P<scm_version>\d+(\.\d+){1,2})\n-+\n))",
with_scm: "{text}\nTagged {rev[timestamp]}\n",
}
If the scm_version is detected, a timestamp will be added to the
namespace.
If detected, the rev[timestamp] is a datetime-aware timestamp,
so arbitrary formatting operators may be applied to it, such as
the following which will render as "Dec 2000"::
{
with_scm: "{rev[timestamp]:%b %Y}",
}
"""
def replace(self, match, replacer_vars):
text = match.group(0)
scm_version = match.group('scm_version')
rev = self._get_scm_info_for(scm_version)
if not rev:
return text
ns = match.groupdict()
ns.update(vars(self))
ns.update(replacer_vars)
return self.with_scm.format(text=text, rev=rev, **ns)
@staticmethod
def _get_scm_info_for(scm_version):
scm = 'hg' if os.path.isdir('.hg') else 'git'
commands = dict(
hg=[
'hg',
'log',
'-l',
'1',
'--template',
'{date|isodate}',
'-r',
scm_version,
],
git=['git', 'log', '-1', '--format=%ai', scm_version],
)
cmd = commands[scm]
try:
with open(os.devnull, 'w', encoding='utf-8') as devnull:
out = subprocess.check_output(
cmd, stderr=devnull, text=True, encoding='utf-8'
)
ts = out.strip()
return dict(timestamp=dateutil.parser.parse(ts))
except Exception:
pass
def __bool__(self):
return 'with_scm' in vars(self)
class Replacer(list):
@staticmethod
def load(filename):
defn = dict()
with open(filename) as stream:
exec(stream.read(), defn)
return defn
@classmethod
def from_definition(cls, defn, names={}):
"""
A definition may contain the following members:
- using: a dictionary of variables available for substitution
- replace: a list of replacement definitions.
"""
repls = map(Repl.from_defn, defn.get('replace', []))
self = cls(repls)
vars(self).update(names)
vars(self).update(defn.get('using', {}))
return self
def run(self, source):
by_pattern = operator.attrgetter('pattern')
pattern = '|'.join(map(by_pattern, self))
return re.sub(pattern, self.replace, source)
def replace(self, match):
text = match.group(0)
# determine which replacement matched
repl = next(repl for repl in self if repl.matches(text))
return repl.replace(match, vars(self))
def write_links(self, source, target):
with io.open(source, encoding='utf-8') as source:
out = self.run(source.read())
with io.open(target, 'w', encoding='utf-8') as dest:
dest.write(out)
def setup(app):
app.add_config_value(str('link_files'), {}, '')
app.connect(str('builder-inited'), make_links)
return dict(
version=importlib_metadata.version('rst.linker'), parallel_read_safe=True
)
def _extend_name(filename):
base, ext = os.path.splitext(filename)
return base + ' (links)' + ext
def _locater(app):
"""
Return a function suitable for locating the path
relative to the config container.
"""
return functools.partial(os.path.join, app.confdir)
def config_dict(config):
"""
Given a Sphinx config object, return a dictionary of config
values.
"""
return dict((key, getattr(config, key)) for key in config.values)
def make_links(app):
files_def = app.config.link_files
_locate = _locater(app)
for filename, defn in files_def.items():
source = _locate(filename)
replacer = Replacer.from_definition(defn, config_dict(app.config))
target = _extend_name(source)
replacer.write_links(source, target)
remover = functools.partial(_remove, target=target)
app.connect(str('build-finished'), remover)
def _remove(app, exception, target):
os.remove(target) | /rst.linker-2.4.0-py3-none-any.whl/rst/linker.py | 0.653901 | 0.210705 | linker.py | pypi |
import re
import sys
import argparse
import math
from io import StringIO
import docutils.frontend
import docutils.nodes
import docutils.parsers.rst
import docutils.transforms.references
import docutils.utils
import docutils.utils.roman
import docutils.writers
# XXX Hack: monkeypatch docutils to support gemini:// URIs
import docutils.utils.urischemes
if "gemini" not in docutils.utils.urischemes.schemes:
docutils.utils.urischemes.schemes["gemini"] = ""
# XXX
def remove_newlines(text):
"""Remove new lines characters and replace them by a space.
Supported end of line formats:
* LF (`\n`): Unix style end of lines
* CR LF (`\r\n`): Windows style end of lines
* CR (`\r`): Legacy macOS end of lines (macOS 9 and earlier)
:param str text: The text to cleanup.
:rtype: str
:return: The cleaned text.
"""
return text.replace("\r\n", " ").replace("\n", " ").replace("\r", " ")
def flatten_node_tree(nodes):
"""Iterates recursively on given nodes and returns a flat list of the
nodes.
:param list<Node> nodes: A list of ``Node``.
:rtype: list<Node>
"""
result_nodes = []
for node in nodes:
if isinstance(node, NodeGroup):
result_nodes += flatten_node_tree(node.nodes)
else:
result_nodes.append(node)
return result_nodes
def search_lines_recursive(rst_node):
"""Search recusrively for line numbers in rst nodes.
:param rst_node: any rst node from docutils.
:rtype: list<int>
"""
lines = []
if rst_node.line:
lines.append(rst_node.line)
if rst_node.children:
for child_rst_node in rst_node.children:
lines += search_lines_recursive(child_rst_node)
return lines
def parse_rst(rst_text, source_path="document"):
"""Parses a reStructuredText document.
:param str rst_text: The reStructuredText to parse.
:param str source_path: The path of the source reStructuredText file
(optional, but required if the document contains an
``include`` directive)
:rtype: docutils.nodes.document
"""
parser = docutils.parsers.rst.Parser()
settings = docutils.frontend.get_default_settings(docutils.parsers.rst.Parser)
document = docutils.utils.new_document(source_path, settings=settings)
document._original_rst = rst_text
parser.parse(rst_text, document)
return document
class Node:
"""Base class to implement Gemini text nodes."""
def __init__(self, rst_node):
#: The original reStructuredText node
self.rst_node = rst_node
#: Contains raw text extracted from reStructuredText nodes.
self.rawtext = ""
def append_text(self, text):
"""Appends some raw text to the current node.
:param str text: The text to append.
"""
self.rawtext += text
def to_gemtext(self, options={}):
"""Generates the Gemtext markup from the current node."""
raise NotImplementedError()
class NodeGroup(Node):
"""Base class to implement groups of Gemini text nodes."""
def __init__(self, rst_node):
Node.__init__(self, rst_node)
#: Nodes of the group
self.nodes = []
def to_gemtext(self):
return "\n".join([node.to_gemtext() for node in self.nodes])
class ParagraphNode(Node):
def to_gemtext(self):
return remove_newlines(self.rawtext)
class TitleNode(Node):
def __init__(self, rst_node, level=1):
Node.__init__(self, rst_node)
self.level = level
def to_gemtext(self):
return " ".join(
[
"#" * max(1, min(3, self.level)),
self.rawtext,
]
)
class PreformattedTextNode(Node):
def __init__(self, rst_node, alt=""):
Node.__init__(self, rst_node)
self.alt = alt
def to_gemtext(self):
return "```%s\n%s\n```" % (
self.alt,
self.rawtext,
)
class BlockQuoteNode(NodeGroup):
def to_gemtext(self):
return "\n>\n".join(["> %s" % node.to_gemtext() for node in self.nodes])
class BulletListNode(NodeGroup):
def to_gemtext(self):
items = []
for node in self.nodes:
if type(node) is ListItemNode:
items.append("* %s" % node.to_gemtext())
else:
items.append(node.to_gemtext())
return "\n".join(items)
class ListItemNode(Node):
def to_gemtext(self):
return remove_newlines(self.rawtext)
class EnumaratedListNode(BulletListNode):
def __init__(self, node, enumtype="arabic", prefix="", suffix=".", start=1):
BulletListNode.__init__(self, node)
self.enumtype = enumtype
self.prefix = prefix
self.suffix = suffix
self.start = start
def _to_arabic(self, number):
return str(number)
def _to_loweralpha(self, number):
glyphs = "abcdefghijklmnopqrstuvwxyz"
result = ""
while number:
number -= 1
result += glyphs[number % len(glyphs)]
number //= len(glyphs)
return result[::-1]
def _to_upperalpha(self, number):
return self._to_loweralpha(number).upper()
def _to_lowerroman(self, number):
return docutils.utils.roman.toRoman(number).lower()
def _to_upperroman(self, number):
return docutils.utils.roman.toRoman(number)
def to_gemtext(self):
items = []
i = self.start
convertor = getattr(self, "_to_%s" % self.enumtype)
for node in self.nodes:
if type(node) is ListItemNode:
items.append(
"* %s%s%s %s"
% (
self.prefix,
convertor(i),
self.suffix,
node.to_gemtext(),
)
)
else:
items.append(node.to_gemtext())
i += 1
return "\n".join(items)
class SystemMessageNode(NodeGroup):
def __init__(self, rst_node, level=1, source="document", line=0, type_="info"):
NodeGroup.__init__(self, rst_node)
self.level = level
self.source = source
self.line = line
self.type_ = type_
def __str__(self):
return "%s:%i: %s: %s" % (
self.source,
self.line,
self.type_,
self.to_gemtext(),
)
class LinkNode(Node):
def __init__(self, rst_node, refname=None, uri=None, text=None):
Node.__init__(self, rst_node)
self.refname = refname
self.uri = uri
if text:
self.rawtext = text
else:
self.rawtext = uri
def to_gemtext(self):
if not self.uri:
raise ValueError("Link URI not resolved!")
if self.rawtext == self.uri:
return "=> %s" % self.uri
else:
return "=> %s %s" % (self.uri, self.rawtext)
class LinkGroupNode(NodeGroup):
pass
class SeparatorNode(Node):
def to_gemtext(self):
return "-" * 80
class RawNode(Node):
def __init__(self, rst_node, format_):
Node.__init__(self, rst_node)
self.format = format_
def to_gemtext(self):
return self.rawtext
class FigureNode(NodeGroup):
pass
class AdmonitionNode(NodeGroup):
def __init__(self, rst_node, type_=None, title=None):
NodeGroup.__init__(self, rst_node)
self.type = type_
self.title = title
def gen_title(self):
if self.title:
return self.title
if self.type == "note":
return "📝️ Note:"
if self.type == "hint":
return "💡️ Hint"
if self.type == "tip":
return "💡️ Tip"
if self.type == "important":
return "‼️ Important"
if self.type == "attention":
return "⚠️ Attention"
if self.type == "warning":
return "⚠️ Warning"
if self.type == "caution":
return "⚠️ Caution"
if self.type == "danger":
return "⚠️ Danger"
if self.type == "error":
return "⛔️ Error"
return ""
def to_gemtext(self):
result = "-" * 80
result += "\n"
result += self.gen_title()
result += "\n"
result += "-" * 80
result += "\n"
result += NodeGroup.to_gemtext(self)
result += "\n"
result += "-" * 80
return result
class GemtextTranslator(docutils.nodes.GenericNodeVisitor):
"""Translate reStructuredText text nodes to Gemini text nodes."""
#: Nodes to ignore as there is no equivalent markup in Gemtext.
#: NOTE: the text inside the notes will be added to the parent node.
_NOP_NODES = [
"emphasis",
"literal",
"strong",
"target",
]
#: Nodes that should be completely ignored with their content
_SKIPPED_NODES = [
"field_list", # TODO Handle fields as metadata
"comment",
"substitution_definition",
]
def __init__(self, document):
docutils.nodes.GenericNodeVisitor.__init__(self, document)
#: List of Gemtext nodes that compose the final document.
self.nodes = []
#: List of messages generated by docutils
self.messages = []
#: The node that is currently being edited.
self._current_node = None
#: The current section level (used for the titles level)
self._section_level = 0
#: The node that is being skipped
self._skipped_node = None
# Check the document object is patched and contains the original reST
# text. This is required for tables
if not hasattr(document, "_original_rst"):
raise ValueError(
"The given document object do not contains the _original_rst attribute. "
"Please use the rst2gemtext.parse_rst method to create the document object."
)
def dispatch_visit(self, rst_node):
if self._skipped_node:
return
if rst_node.tagname in self._SKIPPED_NODES:
self._skipped_node = rst_node
return
if rst_node.tagname in self._NOP_NODES:
return
docutils.nodes.GenericNodeVisitor.dispatch_visit(self, rst_node)
def dispatch_departure(self, rst_node):
if self._skipped_node:
if self._skipped_node is rst_node:
self._skipped_node = None
return
if rst_node.tagname in self._NOP_NODES:
return
docutils.nodes.GenericNodeVisitor.dispatch_departure(self, rst_node)
def _split_nodes(self, rst_node):
"""Split the node list on the given rst_node.
:param rst_node: The reStructuredText node
:rtype: list[Node]
:return: The nodes below the rst_node
"""
for i in range(len(self.nodes)):
if self.nodes[i].rst_node is rst_node:
break
splitted = self.nodes[i:]
self.nodes = self.nodes[:i]
return splitted
# ==== RST NODES ====
# admonition
def visit_admonition(self, rst_node, type_=None):
admonition_node = AdmonitionNode(rst_node, type_)
self._current_node = None # To catch eventual errors
self.nodes.append(admonition_node)
def depart_admonition(self, rst_node):
nodes = self._split_nodes(rst_node)
admonition_node = nodes.pop(0)
if admonition_node.type is None:
if isinstance(nodes[0], TitleNode):
title_node = nodes.pop(0)
admonition_node.title = title_node.rawtext
admonition_node.nodes = nodes
self.nodes.append(admonition_node)
# attention (admonition)
def visit_attention(self, rst_node):
self.visit_admonition(rst_node, type_="attention")
def depart_attention(self, rst_node):
self.depart_admonition(rst_node)
# block_quote
def visit_block_quote(self, rst_node):
block_quote_node = BlockQuoteNode(rst_node)
self._current_node = None # To catch eventual errors
self.nodes.append(block_quote_node)
def depart_block_quote(self, rst_node):
nodes = self._split_nodes(rst_node)
block_quote_node = nodes.pop(0)
links = []
for node in nodes:
if type(node) is LinkNode:
links.append(node)
elif type(node) is LinkGroupNode:
links.extend(node.nodes)
else:
block_quote_node.nodes.append(node)
if block_quote_node.nodes:
self.nodes.append(block_quote_node)
if links:
if len(links) == 1:
self.nodes.append(links[0])
else:
link_group_node = LinkGroupNode(None)
link_group_node.nodes = links
self.nodes.append(link_group_node)
# bullet_list
def visit_bullet_list(self, rst_node):
bullet_list_node = BulletListNode(rst_node)
self._current_node = None # To catch eventual errors
self.nodes.append(bullet_list_node)
def depart_bullet_list(self, rst_node):
nodes = self._split_nodes(rst_node)
bullet_list_node = nodes.pop(0)
links = []
for node in nodes:
if type(node) is LinkNode:
links.append(node)
elif type(node) is LinkGroupNode:
links.extend(node.nodes)
else:
bullet_list_node.nodes.append(node)
if bullet_list_node.nodes:
self.nodes.append(bullet_list_node)
if links:
if len(links) == 1:
self.nodes.append(links[0])
else:
link_group_node = LinkGroupNode(None)
link_group_node.nodes = links
self.nodes.append(link_group_node)
# caption
def visit_caption(self, rst_node):
self.visit_paragraph(rst_node)
def depart_caption(self, rst_node):
self.depart_paragraph(rst_node)
# caution (admonition)
def visit_caution(self, rst_node):
self.visit_admonition(rst_node, type_="caution")
def depart_caution(self, rst_node):
self.depart_admonition(rst_node)
# danger (admonition)
def visit_danger(self, rst_node):
self.visit_admonition(rst_node, type_="danger")
def depart_danger(self, rst_node):
self.depart_admonition(rst_node)
# enumerated_list
def visit_enumerated_list(self, rst_node):
enumerated_list_node = EnumaratedListNode(
rst_node,
enumtype=rst_node.attributes["enumtype"],
prefix=rst_node.attributes["prefix"],
suffix=rst_node.attributes["suffix"],
start=rst_node.attributes["start"] if "start" in rst_node.attributes else 1,
)
self._current_node = None # To catch eventual errors
self.nodes.append(enumerated_list_node)
def depart_enumerated_list(self, rst_node):
self.depart_bullet_list(rst_node)
# error (admonition)
def visit_error(self, rst_node):
self.visit_admonition(rst_node, type_="error")
def depart_error(self, rst_node):
self.depart_admonition(rst_node)
# figure
def visit_figure(self, rst_node):
figure_node = FigureNode(rst_node)
self._current_node = None
self.nodes.append(figure_node)
def depart_figure(self, rst_node):
nodes = self._split_nodes(rst_node)
figure_node = nodes.pop(0)
for node in nodes:
if (
type(node) is LinkNode
and figure_node.nodes
and type(figure_node.nodes[-1]) is LinkNode
):
prev_node = figure_node.nodes.pop()
if prev_node.uri == node.uri:
if prev_node.rawtext and not node.rawtext:
figure_node.nodes.append(prev_node)
else:
figure_node.nodes.append(node)
else:
# Swap link / image
figure_node.nodes.append(node)
figure_node.nodes.append(prev_node)
elif type(node) is ParagraphNode:
caption_is_alttext = False
for fnode in figure_node.nodes:
if fnode.rawtext == node.rawtext:
caption_is_alttext = True
break
if not caption_is_alttext:
figure_node.nodes.append(node)
else:
figure_node.nodes.append(node)
if (
type(figure_node.nodes[0]) is LinkNode
and type(figure_node.nodes[-1]) is ParagraphNode
):
if figure_node.nodes[0].rawtext == figure_node.nodes[0].uri:
caption = figure_node.nodes.pop()
figure_node.nodes[0].rawtext = caption.rawtext
self.nodes.append(figure_node)
# hint (admonition)
def visit_hint(self, rst_node):
self.visit_admonition(rst_node, type_="hint")
def depart_hint(self, rst_node):
self.depart_admonition(rst_node)
# image
def visit_image(self, rst_node):
link_node = LinkNode(
rst_node,
uri=rst_node.attributes["uri"],
text=rst_node.attributes["alt"] if "alt" in rst_node.attributes else None,
)
self.nodes.append(link_node)
def depart_image(self, rst_node):
pass
# important (admonition)
def visit_important(self, rst_node):
self.visit_admonition(rst_node, type_="important")
def depart_important(self, rst_node):
self.depart_admonition(rst_node)
# list_item
def visit_list_item(self, rst_node):
list_item_node = ListItemNode(rst_node)
self._current_node = None # To catch eventual errors
self.nodes.append(list_item_node)
def depart_list_item(self, rst_node):
nodes = self._split_nodes(rst_node)
list_item_node = nodes.pop(0)
for node in nodes:
if type(node) in [BulletListNode, EnumaratedListNode]:
self.nodes.append(list_item_node)
self.nodes.append(node)
list_item_node = ListItemNode(node)
elif type(node) in [LinkNode, LinkGroupNode]:
self.nodes.append(node)
else:
if list_item_node.rawtext:
list_item_node.append_text(" ")
list_item_node.append_text(node.to_gemtext())
if list_item_node.rawtext:
self.nodes.append(list_item_node)
# literal_block
def visit_literal_block(self, rst_node):
alt = ""
for class_ in rst_node.attributes["classes"]:
if class_ != "code":
alt = class_
break
preformatted_text_node = PreformattedTextNode(rst_node, alt=alt)
self._current_node = preformatted_text_node
self.nodes.append(preformatted_text_node)
def depart_literal_block(self, rst_node):
pass
# note (admonition)
def visit_note(self, rst_node):
self.visit_admonition(rst_node, type_="note")
def depart_note(self, rst_node):
self.depart_admonition(rst_node)
# paragraph
def visit_paragraph(self, rst_node):
paragraph_node = ParagraphNode(rst_node)
self._current_node = paragraph_node
self.nodes.append(paragraph_node)
def depart_paragraph(self, rst_node):
nodes = self._split_nodes(rst_node)
paragraph_node = nodes.pop(0)
if len(nodes) == 1 and nodes[0].rawtext == paragraph_node.rawtext:
self.nodes.append(nodes[0])
else:
if paragraph_node.to_gemtext().strip():
self.nodes.append(paragraph_node)
if nodes:
link_group_node = LinkGroupNode(rst_node)
link_group_node.nodes = nodes
self.nodes.append(link_group_node)
# raw
def visit_raw(self, rst_node):
raw_node = RawNode(rst_node, rst_node.attributes["format"])
self._current_node = raw_node
self.nodes.append(raw_node)
def depart_raw(self, rst_node):
if self.nodes[-1].format not in ["gemtext", "gmi"]:
self.nodes.pop()
# reference
def visit_reference(self, rst_node):
link_node = LinkNode(
rst_node,
refname=rst_node.attributes["refname"]
if "refname" in rst_node.attributes
else None,
uri=rst_node.attributes["refuri"]
if "refuri" in rst_node.attributes
else None,
text=rst_node.attributes["name"] if "name" in rst_node.attributes else None,
)
self.nodes.append(link_node)
def depart_reference(self, rst_node):
pass
# section
def visit_section(self, rst_node):
self._section_level += 1
def depart_section(self, rst_node):
self._section_level -= 1
# system_message
def visit_system_message(self, rst_node):
system_message_node = SystemMessageNode(
rst_node,
level=rst_node.attributes["level"],
line=rst_node.attributes["line"],
source=rst_node.attributes["source"],
type_=rst_node.attributes["type"],
)
self._current_node = None # To catch eventual errors
self.nodes.append(system_message_node)
def depart_system_message(self, rst_node):
nodes = self._split_nodes(rst_node)
system_message_node = nodes.pop(0)
system_message_node.nodes = nodes
self.messages.append(system_message_node)
# table
def visit_table(self, rst_node):
preformatted_text_node = PreformattedTextNode(rst_node)
self._current_node = None # To catch eventual errors
self.nodes.append(preformatted_text_node)
def depart_table(self, rst_node):
# TODO: handle links
nodes = self._split_nodes(rst_node)
preformatted_text_node = nodes.pop(0)
title = ""
line_min = math.inf
line_max = 0
for node in flatten_node_tree(nodes):
if isinstance(node, TitleNode):
title = node.rawtext
continue
lines = search_lines_recursive(node.rst_node)
if lines:
line_min = min(line_min, *lines)
line_max = max(line_max, *lines)
line_min -= 1
line_max += 1
table_lines = self.document._original_rst.split("\n")[line_min - 1 : line_max]
indent = len(re.match(r"^(\s*).*$", table_lines[0]).group(1))
preformatted_text_node.append_text(
"\n".join([line[indent:] for line in table_lines])
)
if title:
preformatted_text_node.alt = title
self.nodes.append(preformatted_text_node)
# Text (leaf)
def visit_Text(self, rst_node):
self._current_node.append_text(rst_node.astext())
def depart_Text(self, rst_node):
pass
# tip (admonition)
def visit_tip(self, rst_node):
self.visit_admonition(rst_node, type_="tip")
def depart_tip(self, rst_node):
self.depart_admonition(rst_node)
# title
def visit_title(self, rst_node):
title_node = TitleNode(rst_node, level=self._section_level)
self._current_node = title_node
self.nodes.append(title_node)
def depart_title(self, rst_node):
pass
# transition
def visit_transition(self, rst_node):
self.nodes.append(SeparatorNode(rst_node))
def depart_transition(self, rst_node):
pass
# warning (admonition)
def visit_warning(self, rst_node):
self.visit_admonition(rst_node, type_="warning")
def depart_warning(self, rst_node):
self.depart_admonition(rst_node)
# ==== DEFAULT ====
def default_visit(self, rst_node):
"""Override for generic, uniform traversals."""
pass
def default_departure(self, rst_node):
"""Override for generic, uniform traversals."""
pass
class GemtextWriter(docutils.writers.Writer):
"""Write Gemtext from reStructuredText ducument."""
def __init__(self):
docutils.writers.Writer.__init__(self)
self.transforms = [
docutils.transforms.references.Substitutions,
docutils.transforms.references.ExternalTargets,
]
self.visitor = None
def translate(self):
self.visitor = GemtextTranslator(self.document)
for Transform in self.transforms:
transform = Transform(self.document)
transform.apply()
self.document.walkabout(self.visitor)
self._before_translate_output_generation_hook()
self.output = (
"\n\n".join([node.to_gemtext() for node in self.visitor.nodes]) + "\n"
)
def _before_translate_output_generation_hook(self):
"""Method called just before generating the final GemText document. At
this stage, the reStructuredText document is parsed, tranformed, and
converted into GemText nodes.
This method can be used by subclasses to manipulate the GemText node
before the final document is generated.
::
for node in self.visitor.nodes:
do_something(node)
"""
pass
def convert(rst_text, source_path="document"):
"""Convert the input reStructuredText to Gemtext.
:param str rst_text: The input reStructuredText.
:param str source_path: The path of the source reStructuredText file
(optional, but required if the document contains an
``include`` directive)
:rtype: str
:return: The converted Gemtext.
"""
document = parse_rst(rst_text, source_path)
output_io = StringIO()
writer = GemtextWriter()
writer.write(document, output_io)
output_io.seek(0)
return output_io.read()
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
prog="rst2gemtext",
description="Converts reStructuredText to Gemtext (Gemini markup format)",
epilog="Inaccurate output? Report bugs to https://github.com/flozz/rst2gemtext/issues",
)
parser.add_argument(
"input_rst",
help="the reStructuredText file to convert",
type=argparse.FileType("r"),
)
parser.add_argument(
"output_gemtext",
help="the output Gemtext file",
type=argparse.FileType("w"),
)
parser.add_argument(
"--print-xml",
help="print the reStructuredText as XML DOM for debug purpose",
action="store_true",
default=False,
)
params = parser.parse_args(args)
input_rst = params.input_rst.read()
document = parse_rst(input_rst, source_path=params.input_rst.name)
if params.print_xml:
print(document.asdom().toprettyxml(indent=" "))
writer = GemtextWriter()
writer.write(document, params.output_gemtext)
for message in writer.visitor.messages:
print(message)
if __name__ == "__main__":
main() | /rst2gemtext-0.3.1.tar.gz/rst2gemtext-0.3.1/rst2gemtext.py | 0.621656 | 0.476214 | rst2gemtext.py | pypi |
# Author: Florian Brucker <mail@florianbrucker.de>
# Copyright: This module has been placed in the public domain.
"""
Math handling for ``html5css3``.
"""
from __future__ import unicode_literals
import codecs
import os.path
from docutils.utils.math.unichar2tex import uni2tex_table
from docutils.utils.math import math2html, pick_math_environment
from docutils.utils.math.latex2mathml import parse_latex_math
from .html import *
__all__ = ['HTMLMathHandler', 'LateXMathHandler', 'MathHandler',
'MathJaxMathHandler', 'MathMLMathHandler', 'SimpleMathHandler']
class MathHandler(object):
"""
Abstract math handler.
"""
CLASS = None
BLOCK_WRAPPER = '%(code)s'
INLINE_WRAPPER = '%(code)s'
def __init__(self):
self._setup_done = False
def convert(self, translator, node, block):
if not self._setup_done:
self._setup(translator)
self._setup_done = True
code = node.astext()
if block:
env = pick_math_environment(code)
wrapper = self.BLOCK_WRAPPER
else:
env = ''
wrapper = self.INLINE_WRAPPER
code = code.translate(uni2tex_table)
code = wrapper % {'code': code, 'env': env}
tag = self._create_tag(code, block)
if self.CLASS:
tag.attrib['class'] = self.CLASS
return tag
def _create_tag(self, code, block):
raise NotImplementedError('Must be implemented in subclass.')
def _setup(self, translator):
pass
class SimpleMathHandler(MathHandler):
"""
Base class for simple math handlers.
"""
BLOCK_TAG = None
INLINE_TAG = None
def _create_tag(self, code, block):
if block:
return self.BLOCK_TAG(code)
else:
return self.INLINE_TAG(code)
class LaTeXMathHandler(SimpleMathHandler):
"""
Math handler for raw LaTeX output.
"""
BLOCK_TAG = Pre
INLINE_TAG = Tt
CLASS = 'math'
BLOCK_WRAPPER = '%(code)s'
INLINE_WRAPPER = '%(code)s'
class MathJaxMathHandler(SimpleMathHandler):
"""
Math handler for MathJax output.
"""
BLOCK_TAG = Div
INLINE_TAG = Span
CLASS = 'math'
BLOCK_WRAPPER = '\\begin{%(env)s}\n%(code)s\n\\end{%(env)s}'
INLINE_WRAPPER = '\(%(code)s\)'
DEFAULT_URL = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js'
DEFAULT_CONFIG = """
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [["\\\\(","\\\\)"]],
displayMath: [['$$','$$'], ["\\\\[","\\\\]"]],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});"""
def __init__(self, js_url=None, config_filename=None):
super(MathJaxMathHandler, self).__init__()
self.js_url = js_url or self.DEFAULT_URL
if config_filename:
with codecs.open(config_filename, 'r', encoding='utf8') as f:
self.config = f.read()
else:
self.config = self.DEFAULT_CONFIG
def _setup(self, translator):
translator.head.append(Script(self.config,
type="text/x-mathjax-config"))
translator.head.append(Script(src=self.js_url))
class MathMLMathHandler(MathHandler):
"""
Math handler for MathML output.
"""
BLOCK_WRAPPER = '%(code)s'
INLINE_WRAPPER = '%(code)s'
def _create_tag(self, code, block):
tree = parse_latex_math(code, inline=(not block))
html = ''.join(tree.xml())
tag = html_to_tags(html)[0]
def strip_ns(tag):
del tag.attrib['xmlns']
for child in tag:
strip_ns(child)
for child in tag:
strip_ns(child)
return tag
class HTMLMathHandler(MathHandler):
"""
Math handler for HTML output.
"""
CLASS = 'formula'
BLOCK_WRAPPER = '\\begin{%(env)s}\n%(code)s\n\\end{%(env)s}'
INLINE_WRAPPER = '$%(code)s$'
DEFAULT_CSS = os.path.join(os.path.dirname(__file__), 'math.css')
def __init__(self, css_filename=None):
super(HTMLMathHandler, self).__init__()
self.css_filename = css_filename or self.DEFAULT_CSS
def _create_tag(self, code, block):
math2html.DocumentParameters.displaymode = block
html = math2html.math2html(code)
tags = html_to_tags(html)
if block:
return Div(*tags)
else:
return Span(*tags)
def _setup(self, translator):
translator.css(os.path.relpath(self.css_filename)) | /rst2html5-tools-0.5.3.tar.gz/rst2html5-tools-0.5.3/html5css3/math.py | 0.697918 | 0.243384 | math.py | pypi |
The deck.core module provides all the basic functionality for creating and
moving through a deck. It does so by applying classes to indicate the state of
the deck and its slides, allowing CSS to take care of the visual representation
of each state. It also provides methods for navigating the deck and inspecting
its state, as well as basic key bindings for going to the next and previous
slides. More functionality is provided by wholly separate extension modules
that use the API provided by core.
*/
(function($, undefined) {
var slides, currentIndex, $container, $fragmentLinks;
var events = {
/*
This event fires at the beginning of a slide change, before the actual
change occurs. Its purpose is to give extension authors a way to prevent
the slide change from occuring. This is done by calling preventDefault
on the event object within this event. If that is done, the deck.change
event will never be fired and the slide will not change.
*/
beforeChange: 'deck.beforeChange',
/*
This event fires whenever the current slide changes, whether by way of
next, prev, or go. The callback function is passed two parameters, from
and to, equal to the indices of the old slide and the new slide
respectively. If preventDefault is called on the event within this handler
the slide change does not occur.
$(document).bind('deck.change', function(event, from, to) {
alert('Moving from slide ' + from + ' to ' + to);
});
*/
change: 'deck.change',
/*
This event fires at the beginning of deck initialization. This event makes
a good hook for preprocessing extensions looking to modify the DOM before
the deck is fully initialized. It is also possible to halt the deck.init
event from firing while you do things in beforeInit. This can be done by
calling lockInit on the event object passed to this event. The init can be
released by calling releaseInit.
$(document).bind('deck.beforeInit', function(event) {
event.lockInit(); // halts deck.init event
window.setTimeout(function() {
event.releaseInit(); // deck.init will now fire 2 seconds later
}, 2000);
});
The init event will be fired regardless of locks after
options.initLockTimeout milliseconds.
*/
beforeInitialize: 'deck.beforeInit',
/*
This event fires at the end of deck initialization. Extensions should
implement any code that relies on user extensible options (key bindings,
element selectors, classes) within a handler for this event. Native
events associated with Deck JS should be scoped under a .deck event
namespace, as with the example below:
var $d = $(document);
$.deck.defaults.keys.myExtensionKeycode = 70; // 'h'
$d.bind('deck.init', function() {
$d.bind('keydown.deck', function(event) {
if (event.which === $.deck.getOptions().keys.myExtensionKeycode) {
// Rock out
}
});
});
*/
initialize: 'deck.init'
};
var options = {};
var $document = $(document);
var $window = $(window);
var stopPropagation = function(event) {
event.stopPropagation();
};
var updateContainerState = function() {
var oldIndex = $container.data('onSlide');
$container.removeClass(options.classes.onPrefix + oldIndex);
$container.addClass(options.classes.onPrefix + currentIndex);
$container.data('onSlide', currentIndex);
};
var updateChildCurrent = function() {
var $oldCurrent = $('.' + options.classes.current);
var $oldParents = $oldCurrent.parentsUntil(options.selectors.container);
var $newCurrent = slides[currentIndex];
var $newParents = $newCurrent.parentsUntil(options.selectors.container);
$oldParents.removeClass(options.classes.childCurrent);
$newParents.addClass(options.classes.childCurrent);
};
var removeOldSlideStates = function() {
var $all = $();
$.each(slides, function(i, el) {
$all = $all.add(el);
});
$all.removeClass([
options.classes.before,
options.classes.previous,
options.classes.current,
options.classes.next,
options.classes.after
].join(' '));
};
var addNewSlideStates = function() {
slides[currentIndex].addClass(options.classes.current);
if (currentIndex > 0) {
slides[currentIndex-1].addClass(options.classes.previous);
}
if (currentIndex + 1 < slides.length) {
slides[currentIndex+1].addClass(options.classes.next);
}
if (currentIndex > 1) {
$.each(slides.slice(0, currentIndex - 1), function(i, $slide) {
$slide.addClass(options.classes.before);
});
}
if (currentIndex + 2 < slides.length) {
$.each(slides.slice(currentIndex+2), function(i, $slide) {
$slide.addClass(options.classes.after);
});
}
};
var setAriaHiddens = function() {
$(options.selectors.slides).each(function() {
var $slide = $(this);
var isSub = $slide.closest('.' + options.classes.childCurrent).length;
var isBefore = $slide.hasClass(options.classes.before) && !isSub;
var isPrevious = $slide.hasClass(options.classes.previous) && !isSub;
var isNext = $slide.hasClass(options.classes.next);
var isAfter = $slide.hasClass(options.classes.after);
var ariaHiddenValue = isBefore || isPrevious || isNext || isAfter;
$slide.attr('aria-hidden', ariaHiddenValue);
});
};
var updateStates = function() {
updateContainerState();
updateChildCurrent();
removeOldSlideStates();
addNewSlideStates();
if (options.setAriaHiddens) {
setAriaHiddens();
}
};
var initSlidesArray = function(elements) {
if ($.isArray(elements)) {
$.each(elements, function(i, element) {
slides.push($(element));
});
}
else {
$(elements).each(function(i, element) {
slides.push($(element));
});
}
};
var bindKeyEvents = function() {
var editables = [
'input',
'textarea',
'select',
'button',
'meter',
'progress',
'[contentEditable]'
].join(', ');
$document.unbind('keydown.deck').bind('keydown.deck', function(event) {
if (event.altKey) {
// ignore events when the ALT key is down
// NB: browsers use ALT+arrow to navigate history
return;
}
var isNext = event.which === options.keys.next;
var isPrev = event.which === options.keys.previous;
isNext = isNext || $.inArray(event.which, options.keys.next) > -1;
isPrev = isPrev || $.inArray(event.which, options.keys.previous) > -1;
if (isNext) {
methods.next();
event.preventDefault();
}
else if (isPrev) {
methods.prev();
event.preventDefault();
}
});
$document.undelegate(editables, 'keydown.deck', stopPropagation);
$document.delegate(editables, 'keydown.deck', stopPropagation);
};
var bindTouchEvents = function() {
var startTouch;
var direction = options.touch.swipeDirection;
var tolerance = options.touch.swipeTolerance;
var listenToHorizontal = ({ both: true, horizontal: true })[direction];
var listenToVertical = ({ both: true, vertical: true })[direction];
$container.unbind('touchstart.deck');
$container.bind('touchstart.deck', function(event) {
if (!startTouch) {
startTouch = $.extend({}, event.originalEvent.targetTouches[0]);
}
});
$container.unbind('touchmove.deck');
$container.bind('touchmove.deck', function(event) {
$.each(event.originalEvent.changedTouches, function(i, touch) {
if (!startTouch || touch.identifier !== startTouch.identifier) {
return true;
}
var xDistance = touch.screenX - startTouch.screenX;
var yDistance = touch.screenY - startTouch.screenY;
var leftToRight = xDistance > tolerance && listenToHorizontal;
var rightToLeft = xDistance < -tolerance && listenToHorizontal;
var topToBottom = yDistance > tolerance && listenToVertical;
var bottomToTop = yDistance < -tolerance && listenToVertical;
if (leftToRight || topToBottom) {
$.deck('prev');
startTouch = undefined;
}
else if (rightToLeft || bottomToTop) {
$.deck('next');
startTouch = undefined;
}
return false;
});
if (listenToVertical) {
event.preventDefault();
}
});
$container.unbind('touchend.deck');
$container.bind('touchend.deck', function(event) {
$.each(event.originalEvent.changedTouches, function(i, touch) {
if (startTouch && touch.identifier === startTouch.identifier) {
startTouch = undefined;
}
});
});
};
var indexInBounds = function(index) {
return typeof index === 'number' && index >=0 && index < slides.length;
};
var createBeforeInitEvent = function() {
var event = $.Event(events.beforeInitialize);
event.locks = 0;
event.done = $.noop;
event.lockInit = function() {
++event.locks;
};
event.releaseInit = function() {
--event.locks;
if (!event.locks) {
event.done();
}
};
return event;
};
var goByHash = function(str) {
var id = str.substr(str.indexOf("#") + 1);
$.each(slides, function(i, $slide) {
if ($slide.attr('id') === id) {
$.deck('go', i);
return false;
}
});
// If we don't set these to 0 the container scrolls due to hashchange
if (options.preventFragmentScroll) {
$.deck('getContainer').scrollLeft(0).scrollTop(0);
}
};
var assignSlideId = function(i, $slide) {
var currentId = $slide.attr('id');
var previouslyAssigned = $slide.data('deckAssignedId') === currentId;
if (!currentId || previouslyAssigned) {
$slide.attr('id', options.hashPrefix + i);
$slide.data('deckAssignedId', options.hashPrefix + i);
}
};
var removeContainerHashClass = function(id) {
$container.removeClass(options.classes.onPrefix + id);
};
var addContainerHashClass = function(id) {
$container.addClass(options.classes.onPrefix + id);
};
var setupHashBehaviors = function() {
$fragmentLinks = $();
$.each(slides, function(i, $slide) {
var hash;
assignSlideId(i, $slide);
hash = '#' + $slide.attr('id');
if (hash === window.location.hash) {
setTimeout(function() {
$.deck('go', i);
}, 1);
}
$fragmentLinks = $fragmentLinks.add('a[href="' + hash + '"]');
});
if (slides.length) {
addContainerHashClass($.deck('getSlide').attr('id'));
};
};
var changeHash = function(from, to) {
var hash = '#' + $.deck('getSlide', to).attr('id');
var hashPath = window.location.href.replace(/#.*/, '') + hash;
removeContainerHashClass($.deck('getSlide', from).attr('id'));
addContainerHashClass($.deck('getSlide', to).attr('id'));
if (Modernizr.history) {
window.history.replaceState({}, "", hashPath);
}
};
/* Methods exposed in the jQuery.deck namespace */
var methods = {
/*
jQuery.deck(selector, options)
selector: string | jQuery | array
options: object, optional
Initializes the deck, using each element matched by selector as a slide.
May also be passed an array of string selectors or jQuery objects, in
which case each selector in the array is considered a slide. The second
parameter is an optional options object which will extend the default
values.
Users may also pass only an options object to init. In this case the slide
selector will be options.selectors.slides which defaults to .slide.
$.deck('.slide');
or
$.deck([
'#first-slide',
'#second-slide',
'#etc'
]);
*/
init: function(opts) {
var beforeInitEvent = createBeforeInitEvent();
var overrides = opts;
if (!$.isPlainObject(opts)) {
overrides = arguments[1] || {};
$.extend(true, overrides, {
selectors: {
slides: arguments[0]
}
});
}
options = $.extend(true, {}, $.deck.defaults, overrides);
slides = [];
currentIndex = 0;
$container = $(options.selectors.container);
// Hide the deck while states are being applied to kill transitions
$container.addClass(options.classes.loading);
// populate the array of slides for pre-init
initSlidesArray(options.selectors.slides);
// Pre init event for preprocessing hooks
beforeInitEvent.done = function() {
// re-populate the array of slides
slides = [];
initSlidesArray(options.selectors.slides);
setupHashBehaviors();
bindKeyEvents();
bindTouchEvents();
$container.scrollLeft(0).scrollTop(0);
if (slides.length) {
updateStates();
}
// Show deck again now that slides are in place
$container.removeClass(options.classes.loading);
$document.trigger(events.initialize);
};
$document.trigger(beforeInitEvent);
if (!beforeInitEvent.locks) {
beforeInitEvent.done();
}
window.setTimeout(function() {
if (beforeInitEvent.locks) {
if (window.console) {
window.console.warn('Something locked deck initialization\
without releasing it before the timeout. Proceeding with\
initialization anyway.');
}
beforeInitEvent.done();
}
}, options.initLockTimeout);
},
/*
jQuery.deck('go', index)
index: integer | string
Moves to the slide at the specified index if index is a number. Index is
0-based, so $.deck('go', 0); will move to the first slide. If index is a
string this will move to the slide with the specified id. If index is out
of bounds or doesn't match a slide id the call is ignored.
*/
go: function(indexOrId) {
var beforeChangeEvent = $.Event(events.beforeChange);
var index;
/* Number index, easy. */
if (indexInBounds(indexOrId)) {
index = indexOrId;
}
/* Id string index, search for it and set integer index */
else if (typeof indexOrId === 'string') {
$.each(slides, function(i, $slide) {
if ($slide.attr('id') === indexOrId) {
index = i;
return false;
}
});
}
if (typeof index === 'undefined') {
return;
}
/* Trigger beforeChange. If nothing prevents the change, trigger
the slide change. */
$document.trigger(beforeChangeEvent, [currentIndex, index]);
if (!beforeChangeEvent.isDefaultPrevented()) {
$document.trigger(events.change, [currentIndex, index]);
changeHash(currentIndex, index);
currentIndex = index;
updateStates();
}
},
/*
jQuery.deck('next')
Moves to the next slide. If the last slide is already active, the call
is ignored.
*/
next: function() {
methods.go(currentIndex+1);
},
/*
jQuery.deck('prev')
Moves to the previous slide. If the first slide is already active, the
call is ignored.
*/
prev: function() {
methods.go(currentIndex-1);
},
/*
jQuery.deck('getSlide', index)
index: integer, optional
Returns a jQuery object containing the slide at index. If index is not
specified, the current slide is returned.
*/
getSlide: function(index) {
index = typeof index !== 'undefined' ? index : currentIndex;
if (!indexInBounds(index)) {
return null;
}
return slides[index];
},
/*
jQuery.deck('getSlides')
Returns all slides as an array of jQuery objects.
*/
getSlides: function() {
return slides;
},
/*
jQuery.deck('getTopLevelSlides')
Returns all slides that are not subslides.
*/
getTopLevelSlides: function() {
var topLevelSlides = [];
var slideSelector = options.selectors.slides;
var subSelector = [slideSelector, slideSelector].join(' ');
$.each(slides, function(i, $slide) {
if (!$slide.is(subSelector)) {
topLevelSlides.push($slide);
}
});
return topLevelSlides;
},
/*
jQuery.deck('getNestedSlides', index)
index: integer, optional
Returns all the nested slides of the current slide. If index is
specified it returns the nested slides of the slide at that index.
If there are no nested slides this will return an empty array.
*/
getNestedSlides: function(index) {
var targetIndex = index == null ? currentIndex : index;
var $targetSlide = $.deck('getSlide', targetIndex);
var $nesteds = $targetSlide.find(options.selectors.slides);
var nesteds = $nesteds.get();
return $.map(nesteds, function(slide, i) {
return $(slide);
});
},
/*
jQuery.deck('getContainer')
Returns a jQuery object containing the deck container as defined by the
container option.
*/
getContainer: function() {
return $container;
},
/*
jQuery.deck('getOptions')
Returns the options object for the deck, including any overrides that
were defined at initialization.
*/
getOptions: function() {
return options;
},
/*
jQuery.deck('extend', name, method)
name: string
method: function
Adds method to the deck namespace with the key of name. This doesn’t
give access to any private member data — public methods must still be
used within method — but lets extension authors piggyback on the deck
namespace rather than pollute jQuery.
$.deck('extend', 'alert', function(msg) {
alert(msg);
});
// Alerts 'boom'
$.deck('alert', 'boom');
*/
extend: function(name, method) {
methods[name] = method;
}
};
/* jQuery extension */
$.deck = function(method, arg) {
var args = Array.prototype.slice.call(arguments, 1);
if (methods[method]) {
return methods[method].apply(this, args);
}
else {
return methods.init(method, arg);
}
};
/*
The default settings object for a deck. All deck extensions should extend
this object to add defaults for any of their options.
options.classes.after
This class is added to all slides that appear after the 'next' slide.
options.classes.before
This class is added to all slides that appear before the 'previous'
slide.
options.classes.childCurrent
This class is added to all elements in the DOM tree between the
'current' slide and the deck container. For standard slides, this is
mostly seen and used for nested slides.
options.classes.current
This class is added to the current slide.
options.classes.loading
This class is applied to the deck container during loading phases and is
primarily used as a way to short circuit transitions between states
where such transitions are distracting or unwanted. For example, this
class is applied during deck initialization and then removed to prevent
all the slides from appearing stacked and transitioning into place
on load.
options.classes.next
This class is added to the slide immediately following the 'current'
slide.
options.classes.onPrefix
This prefix, concatenated with the current slide index, is added to the
deck container as you change slides.
options.classes.previous
This class is added to the slide immediately preceding the 'current'
slide.
options.selectors.container
Elements matched by this CSS selector will be considered the deck
container. The deck container is used to scope certain states of the
deck, as with the onPrefix option, or with extensions such as deck.goto
and deck.menu.
options.selectors.slides
Elements matched by this selector make up the individual deck slides.
If a user chooses to pass the slide selector as the first argument to
$.deck() on initialization it does the same thing as passing in this
option and this option value will be set to the value of that parameter.
options.keys.next
The numeric keycode used to go to the next slide.
options.keys.previous
The numeric keycode used to go to the previous slide.
options.touch.swipeDirection
The direction swipes occur to cause slide changes. Can be 'horizontal',
'vertical', or 'both'. Any other value or a falsy value will disable
swipe gestures for navigation.
options.touch.swipeTolerance
The number of pixels the users finger must travel to produce a swipe
gesture.
options.initLockTimeout
The number of milliseconds the init event will wait for BeforeInit event
locks to be released before firing the init event regardless.
options.hashPrefix
Every slide that does not have an id is assigned one at initialization.
Assigned ids take the form of hashPrefix + slideIndex, e.g., slide-0,
slide-12, etc.
options.preventFragmentScroll
When deep linking to a hash of a nested slide, this scrolls the deck
container to the top, undoing the natural browser behavior of scrolling
to the document fragment on load.
options.setAriaHiddens
When set to true, deck.js will set aria hidden attributes for slides
that do not appear onscreen according to a typical heirarchical
deck structure. You may want to turn this off if you are using a theme
where slides besides the current slide are visible on screen and should
be accessible to screenreaders.
*/
$.deck.defaults = {
classes: {
after: 'deck-after',
before: 'deck-before',
childCurrent: 'deck-child-current',
current: 'deck-current',
loading: 'deck-loading',
next: 'deck-next',
onPrefix: 'on-slide-',
previous: 'deck-previous'
},
selectors: {
container: '.deck-container',
slides: '.slide'
},
keys: {
// enter, space, page down, right arrow, down arrow,
next: [13, 32, 34, 39, 40],
// backspace, page up, left arrow, up arrow
previous: [8, 33, 37, 38]
},
touch: {
swipeDirection: 'horizontal',
swipeTolerance: 60
},
initLockTimeout: 10000,
hashPrefix: 'slide-',
preventFragmentScroll: true,
setAriaHiddens: true
};
$document.ready(function() {
$('html').addClass('ready');
});
$window.bind('hashchange.deck', function(event) {
if (event.originalEvent && event.originalEvent.newURL) {
goByHash(event.originalEvent.newURL);
}
else {
goByHash(window.location.hash);
}
});
$window.bind('load.deck', function() {
if (options.preventFragmentScroll) {
$container.scrollLeft(0).scrollTop(0);
}
});
})(jQuery); | /rst2html5-tools-0.5.3.tar.gz/rst2html5-tools-0.5.3/html5css3/thirdparty/deckjs/core/deck.core.js | 0.50952 | 0.504944 | deck.core.js | pypi |
# reveal.js [](https://travis-ci.org/hakimel/reveal.js)
A framework for easily creating beautiful presentations using HTML. [Check out the live demo](http://lab.hakim.se/reveal-js/).
reveal.js comes with a broad range of features including [nested slides](https://github.com/hakimel/reveal.js#markup), [markdown contents](https://github.com/hakimel/reveal.js#markdown), [PDF export](https://github.com/hakimel/reveal.js#pdf-export), [speaker notes](https://github.com/hakimel/reveal.js#speaker-notes) and a [JavaScript API](https://github.com/hakimel/reveal.js#api). It's best viewed in a browser with support for CSS 3D transforms but [fallbacks](https://github.com/hakimel/reveal.js/wiki/Browser-Support) are available to make sure your presentation can still be viewed elsewhere.
#### More reading:
- [Installation](#installation): Step-by-step instructions for getting reveal.js running on your computer.
- [Changelog](https://github.com/hakimel/reveal.js/releases): Up-to-date version history.
- [Examples](https://github.com/hakimel/reveal.js/wiki/Example-Presentations): Presentations created with reveal.js, add your own!
- [Browser Support](https://github.com/hakimel/reveal.js/wiki/Browser-Support): Explanation of browser support and fallbacks.
## Online Editor
Presentations are written using HTML or markdown but there's also an online editor for those of you who prefer a graphical interface. Give it a try at [http://slid.es](http://slid.es).
## Instructions
### Markup
Markup hierarchy needs to be ``<div class="reveal"> <div class="slides"> <section>`` where the ``<section>`` represents one slide and can be repeated indefinitely. If you place multiple ``<section>``'s inside of another ``<section>`` they will be shown as vertical slides. The first of the vertical slides is the "root" of the others (at the top), and it will be included in the horizontal sequence. For example:
```html
<div class="reveal">
<div class="slides">
<section>Single Horizontal Slide</section>
<section>
<section>Vertical Slide 1</section>
<section>Vertical Slide 2</section>
</section>
</div>
</div>
```
### Markdown
It's possible to write your slides using Markdown. To enable Markdown, add the ```data-markdown``` attribute to your ```<section>``` elements and wrap the contents in a ```<script type="text/template">``` like the example below.
This is based on [data-markdown](https://gist.github.com/1343518) from [Paul Irish](https://github.com/paulirish) modified to use [marked](https://github.com/chjj/marked) to support [Github Flavoured Markdown](https://help.github.com/articles/github-flavored-markdown). Sensitive to indentation (avoid mixing tabs and spaces) and line breaks (avoid consecutive breaks).
```html
<section data-markdown>
<script type="text/template">
## Page title
A paragraph with some text and a [link](http://hakim.se).
</script>
</section>
```
#### External Markdown
You can write your content as a separate file and have reveal.js load it at runtime. Note the separator arguments which determine how slides are delimited in the external file. The ```data-charset``` attribute is optional and specifies which charset to use when loading the external file.
When used locally, this feature requires that reveal.js [runs from a local web server](#full-setup).
```html
<section data-markdown="example.md"
data-separator="^\n\n\n"
data-vertical="^\n\n"
data-notes="^Note:"
data-charset="iso-8859-15">
</section>
```
#### Element Attributes
Special syntax (in html comment) is available for adding attributes to Markdown elements. This is useful for fragments, amongst other things.
```html
<section data-markdown>
<script type="text/template">
- Item 1 <!-- .element: class="fragment" data-fragment-index="2" -->
- Item 2 <!-- .element: class="fragment" data-fragment-index="1" -->
</script>
</section>
```
#### Slide Attributes
Special syntax (in html comment) is available for adding attributes to the slide `<section>` elements generated by your Markdown.
```html
<section data-markdown>
<script type="text/template">
<!-- .slide: data-background="#ff0000" -->
Mardown content
</script>
</section>
```
### Configuration
At the end of your page you need to initialize reveal by running the following code. Note that all config values are optional and will default as specified below.
```javascript
Reveal.initialize({
// Display controls in the bottom right corner
controls: true,
// Display a presentation progress bar
progress: true,
// Display the page number of the current slide
slideNumber: false,
// Push each slide change to the browser history
history: false,
// Enable keyboard shortcuts for navigation
keyboard: true,
// Enable the slide overview mode
overview: true,
// Vertical centering of slides
center: true,
// Enables touch navigation on devices with touch input
touch: true,
// Loop the presentation
loop: false,
// Change the presentation direction to be RTL
rtl: false,
// Turns fragments on and off globally
fragments: true,
// Flags if the presentation is running in an embedded mode,
// i.e. contained within a limited portion of the screen
embedded: false,
// Number of milliseconds between automatically proceeding to the
// next slide, disabled when set to 0, this value can be overwritten
// by using a data-autoslide attribute on your slides
autoSlide: 0,
// Stop auto-sliding after user input
autoSlideStoppable: true,
// Enable slide navigation via mouse wheel
mouseWheel: false,
// Hides the address bar on mobile devices
hideAddressBar: true,
// Opens links in an iframe preview overlay
previewLinks: false,
// Transition style
transition: 'default', // default/cube/page/concave/zoom/linear/fade/none
// Transition speed
transitionSpeed: 'default', // default/fast/slow
// Transition style for full page slide backgrounds
backgroundTransition: 'default', // default/none/slide/concave/convex/zoom
// Number of slides away from the current that are visible
viewDistance: 3,
// Parallax background image
parallaxBackgroundImage: '', // e.g. "'https://s3.amazonaws.com/hakim-static/reveal-js/reveal-parallax-1.jpg'"
// Parallax background size
parallaxBackgroundSize: '' // CSS syntax, e.g. "2100px 900px"
});
```
Note that the new default vertical centering option will break compatibility with slides that were using transitions with backgrounds (`cube` and `page`). To restore the previous behavior, set `center` to `false`.
The configuration can be updated after initialization using the ```configure``` method:
```javascript
// Turn autoSlide off
Reveal.configure({ autoSlide: 0 });
// Start auto-sliding every 5s
Reveal.configure({ autoSlide: 5000 });
```
### Dependencies
Reveal.js doesn't _rely_ on any third party scripts to work but a few optional libraries are included by default. These libraries are loaded as dependencies in the order they appear, for example:
```javascript
Reveal.initialize({
dependencies: [
// Cross-browser shim that fully implements classList - https://github.com/eligrey/classList.js/
{ src: 'lib/js/classList.js', condition: function() { return !document.body.classList; } },
// Interpret Markdown in <section> elements
{ src: 'plugin/markdown/marked.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
{ src: 'plugin/markdown/markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
// Syntax highlight for <code> elements
{ src: 'plugin/highlight/highlight.js', async: true, callback: function() { hljs.initHighlightingOnLoad(); } },
// Zoom in and out with Alt+click
{ src: 'plugin/zoom-js/zoom.js', async: true, condition: function() { return !!document.body.classList; } },
// Speaker notes
{ src: 'plugin/notes/notes.js', async: true, condition: function() { return !!document.body.classList; } },
// Remote control your reveal.js presentation using a touch device
{ src: 'plugin/remotes/remotes.js', async: true, condition: function() { return !!document.body.classList; } },
// MathJax
{ src: 'plugin/math/math.js', async: true }
]
});
```
You can add your own extensions using the same syntax. The following properties are available for each dependency object:
- **src**: Path to the script to load
- **async**: [optional] Flags if the script should load after reveal.js has started, defaults to false
- **callback**: [optional] Function to execute when the script has loaded
- **condition**: [optional] Function which must return true for the script to be loaded
### Presentation Size
All presentations have a normal size, that is the resolution at which they are authored. The framework will automatically scale presentations uniformly based on this size to ensure that everything fits on any given display or viewport.
See below for a list of configuration options related to sizing, including default values:
```javascript
Reveal.initialize({
...
// The "normal" size of the presentation, aspect ratio will be preserved
// when the presentation is scaled to fit different resolutions. Can be
// specified using percentage units.
width: 960,
height: 700,
// Factor of the display size that should remain empty around the content
margin: 0.1,
// Bounds for smallest/largest possible scale to apply to content
minScale: 0.2,
maxScale: 1.0
});
```
### Auto-sliding
Presentations can be configure to progress through slides automatically, without any user input. To enable this you will need to tell the framework how many milliseconds it should wait between slides:
```javascript
// Slide every five seconds
Reveal.configure({
autoSlide: 5000
});
```
When this is turned on a control element will appear that enables users to pause and resume auto-sliding. Sliding is also paused automatically as soon as the user starts navigating. You can disable these controls by specifying ```autoSlideStoppable: false``` in your reveal.js config.
You can also override the slide duration for individual slides by using the ```data-autoslide``` attribute on individual sections:
```html
<section data-autoslide="10000">This will remain on screen for 10 seconds</section>
```
### Keyboard Bindings
If you're unhappy with any of the default keyboard bindings you can override them using the ```keyboard``` config option:
```javascript
Reveal.configure({
keyboard: {
13: 'next', // go to the next slide when the ENTER key is pressed
27: function() {}, // do something custom when ESC is pressed
32: null // don't do anything when SPACE is pressed (i.e. disable a reveal.js default binding)
}
});
```
### API
The ``Reveal`` class provides a JavaScript API for controlling navigation and reading state:
```javascript
// Navigation
Reveal.slide( indexh, indexv, indexf );
Reveal.left();
Reveal.right();
Reveal.up();
Reveal.down();
Reveal.prev();
Reveal.next();
Reveal.prevFragment();
Reveal.nextFragment();
Reveal.toggleOverview();
Reveal.togglePause();
// Retrieves the previous and current slide elements
Reveal.getPreviousSlide();
Reveal.getCurrentSlide();
Reveal.getIndices(); // { h: 0, v: 0 } }
// State checks
Reveal.isFirstSlide();
Reveal.isLastSlide();
Reveal.isOverview();
Reveal.isPaused();
```
### Ready Event
The 'ready' event is fired when reveal.js has loaded all (synchronous) dependencies and is ready to start navigating.
```javascript
Reveal.addEventListener( 'ready', function( event ) {
// event.currentSlide, event.indexh, event.indexv
} );
```
### Slide Changed Event
An 'slidechanged' event is fired each time the slide is changed (regardless of state). The event object holds the index values of the current slide as well as a reference to the previous and current slide HTML nodes.
Some libraries, like MathJax (see [#226](https://github.com/hakimel/reveal.js/issues/226#issuecomment-10261609)), get confused by the transforms and display states of slides. Often times, this can be fixed by calling their update or render function from this callback.
```javascript
Reveal.addEventListener( 'slidechanged', function( event ) {
// event.previousSlide, event.currentSlide, event.indexh, event.indexv
} );
```
### States
If you set ``data-state="somestate"`` on a slide ``<section>``, "somestate" will be applied as a class on the document element when that slide is opened. This allows you to apply broad style changes to the page based on the active slide.
Furthermore you can also listen to these changes in state via JavaScript:
```javascript
Reveal.addEventListener( 'somestate', function() {
// TODO: Sprinkle magic
}, false );
```
### Slide Backgrounds
Slides are contained within a limited portion of the screen by default to allow them to fit any display and scale uniformly. You can apply full page background colors or images by applying a ```data-background``` attribute to your ```<section>``` elements. Below are a few examples.
```html
<section data-background="#ff0000">
<h2>All CSS color formats are supported, like rgba() or hsl().</h2>
</section>
<section data-background="http://example.com/image.png">
<h2>This slide will have a full-size background image.</h2>
</section>
<section data-background="http://example.com/image.png" data-background-size="100px" data-background-repeat="repeat">
<h2>This background image will be sized to 100px and repeated.</h2>
</section>
```
Backgrounds transition using a fade animation by default. This can be changed to a linear sliding transition by passing ```backgroundTransition: 'slide'``` to the ```Reveal.initialize()``` call. Alternatively you can set ```data-background-transition``` on any section with a background to override that specific transition.
### Parallax Background
If you want to use a parallax scrolling background, set the two following config properties when initializing reveal.js (the third one is optional).
```javascript
Reveal.initialize({
// Parallax background image
parallaxBackgroundImage: '', // e.g. "https://s3.amazonaws.com/hakim-static/reveal-js/reveal-parallax-1.jpg"
// Parallax background size
parallaxBackgroundSize: '', // CSS syntax, e.g. "2100px 900px" - currently only pixels are supported (don't use % or auto)
// This slide transition gives best results:
transition: linear
});
```
Make sure that the background size is much bigger than screen size to allow for some scrolling. [View example](http://lab.hakim.se/reveal-js/?parallaxBackgroundImage=https%3A%2F%2Fs3.amazonaws.com%2Fhakim-static%2Freveal-js%2Freveal-parallax-1.jpg¶llaxBackgroundSize=2100px%20900px).
### Slide Transitions
The global presentation transition is set using the ```transition``` config value. You can override the global transition for a specific slide by using the ```data-transition``` attribute:
```html
<section data-transition="zoom">
<h2>This slide will override the presentation transition and zoom!</h2>
</section>
<section data-transition-speed="fast">
<h2>Choose from three transition speeds: default, fast or slow!</h2>
</section>
```
Note that this does not work with the page and cube transitions.
### Internal links
It's easy to link between slides. The first example below targets the index of another slide whereas the second targets a slide with an ID attribute (```<section id="some-slide">```):
```html
<a href="#/2/2">Link</a>
<a href="#/some-slide">Link</a>
```
You can also add relative navigation links, similar to the built in reveal.js controls, by appending one of the following classes on any element. Note that each element is automatically given an ```enabled``` class when it's a valid navigation route based on the current slide.
```html
<a href="#" class="navigate-left">
<a href="#" class="navigate-right">
<a href="#" class="navigate-up">
<a href="#" class="navigate-down">
<a href="#" class="navigate-prev"> <!-- Previous vertical or horizontal slide -->
<a href="#" class="navigate-next"> <!-- Next vertical or horizontal slide -->
```
### Fragments
Fragments are used to highlight individual elements on a slide. Every element with the class ```fragment``` will be stepped through before moving on to the next slide. Here's an example: http://lab.hakim.se/reveal-js/#/fragments
The default fragment style is to start out invisible and fade in. This style can be changed by appending a different class to the fragment:
```html
<section>
<p class="fragment grow">grow</p>
<p class="fragment shrink">shrink</p>
<p class="fragment roll-in">roll-in</p>
<p class="fragment fade-out">fade-out</p>
<p class="fragment current-visible">visible only once</p>
<p class="fragment highlight-current-blue">blue only once</p>
<p class="fragment highlight-red">highlight-red</p>
<p class="fragment highlight-green">highlight-green</p>
<p class="fragment highlight-blue">highlight-blue</p>
</section>
```
Multiple fragments can be applied to the same element sequentially by wrapping it, this will fade in the text on the first step and fade it back out on the second.
```html
<section>
<span class="fragment fade-in">
<span class="fragment fade-out">I'll fade in, then out</span>
</span>
</section>
```
The display order of fragments can be controlled using the ```data-fragment-index``` attribute.
```html
<section>
<p class="fragment" data-fragment-index="3">Appears last</p>
<p class="fragment" data-fragment-index="1">Appears first</p>
<p class="fragment" data-fragment-index="2">Appears second</p>
</section>
```
### Fragment events
When a slide fragment is either shown or hidden reveal.js will dispatch an event.
Some libraries, like MathJax (see #505), get confused by the initially hidden fragment elements. Often times this can be fixed by calling their update or render function from this callback.
```javascript
Reveal.addEventListener( 'fragmentshown', function( event ) {
// event.fragment = the fragment DOM element
} );
Reveal.addEventListener( 'fragmenthidden', function( event ) {
// event.fragment = the fragment DOM element
} );
```
### Code syntax highlighting
By default, Reveal is configured with [highlight.js](http://softwaremaniacs.org/soft/highlight/en/) for code syntax highlighting. Below is an example with clojure code that will be syntax highlighted. When the `data-trim` attribute is present surrounding whitespace is automatically removed.
```html
<section>
<pre><code data-trim>
(def lazy-fib
(concat
[0 1]
((fn rfib [a b]
(lazy-cons (+ a b) (rfib b (+ a b)))) 0 1)))
</code></pre>
</section>
```
### Slide number
If you would like to display the page number of the current slide you can do so using the ```slideNumber``` configuration value.
```javascript
Reveal.configure({ slideNumber: true });
```
### Overview mode
Press "Esc" or "o" keys to toggle the overview mode on and off. While you're in this mode, you can still navigate between slides,
as if you were at 1,000 feet above your presentation. The overview mode comes with a few API hooks:
```javascript
Reveal.addEventListener( 'overviewshown', function( event ) { /* ... */ } );
Reveal.addEventListener( 'overviewhidden', function( event ) { /* ... */ } );
// Toggle the overview mode programmatically
Reveal.toggleOverview();
```
### Fullscreen mode
Just press »F« on your keyboard to show your presentation in fullscreen mode. Press the »ESC« key to exit fullscreen mode.
### Embedded media
Embedded HTML5 `<video>`/`<audio>` and YouTube iframes are automatically paused when you navigate away from a slide. This can be disabled by decorating your element with a `data-ignore` attribute.
Add `data-autoplay` to your media element if you want it to automatically start playing when the slide is shown:
```html
<video data-autoplay src="http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"></video>
```
Additionally the framework automatically pushes two [post messages](https://developer.mozilla.org/en-US/docs/Web/API/Window.postMessage) to all iframes, ```slide:start``` when the slide containing the iframe is made visible and ```slide:stop``` when it is hidden.
### Stretching elements
Sometimes it's desirable to have an element, like an image or video, stretch to consume as much space as possible within a given slide. This can be done by adding the ```.stretch``` class to an element as seen below:
```html
<section>
<h2>This video will use up the remaining space on the slide</h2>
<video class="stretch" src="http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"></video>
</section>
```
Limitations:
- Only direct descendants of a slide section can be stretched
- Only one descendant per slide section can be stretched
## PDF Export
Presentations can be exported to PDF via a special print stylesheet. This feature requires that you use [Google Chrome](http://google.com/chrome).
Here's an example of an exported presentation that's been uploaded to SlideShare: http://www.slideshare.net/hakimel/revealjs-13872948.
1. Open your presentation with [css/print/pdf.css](https://github.com/hakimel/reveal.js/blob/master/css/print/pdf.css) included on the page. The default index HTML lets you add *print-pdf* anywhere in the query to include the stylesheet, for example: [lab.hakim.se/reveal-js?print-pdf](http://lab.hakim.se/reveal-js?print-pdf).
2. Open the in-browser print dialog (CMD+P).
3. Change the **Destination** setting to **Save as PDF**.
4. Change the **Layout** to **Landscape**.
5. Change the **Margins** to **None**.
6. Click **Save**.

## Theming
The framework comes with a few different themes included:
- default: Gray background, white text, blue links
- beige: Beige background, dark text, brown links
- sky: Blue background, thin white text, blue links
- night: Black background, thick white text, orange links
- serif: Cappuccino background, gray text, brown links
- simple: White background, black text, blue links
- solarized: Cream-colored background, dark green text, blue links
Each theme is available as a separate stylesheet. To change theme you will need to replace **default** below with your desired theme name in index.html:
```html
<link rel="stylesheet" href="css/theme/default.css" id="theme">
```
If you want to add a theme of your own see the instructions here: [/css/theme/README.md](https://github.com/hakimel/reveal.js/blob/master/css/theme/README.md).
## Speaker Notes
reveal.js comes with a speaker notes plugin which can be used to present per-slide notes in a separate browser window. The notes window also gives you a preview of the next upcoming slide so it may be helpful even if you haven't written any notes. Press the 's' key on your keyboard to open the notes window.
Notes are defined by appending an ```<aside>``` element to a slide as seen below. You can add the ```data-markdown``` attribute to the aside element if you prefer writing notes using Markdown.
When used locally, this feature requires that reveal.js [runs from a local web server](#full-setup).
```html
<section>
<h2>Some Slide</h2>
<aside class="notes">
Oh hey, these are some notes. They'll be hidden in your presentation, but you can see them if you open the speaker notes window (hit 's' on your keyboard).
</aside>
</section>
```
If you're using the external Markdown plugin, you can add notes with the help of a special delimiter:
```html
<section data-markdown="example.md" data-separator="^\n\n\n" data-vertical="^\n\n" data-notes="^Note:"></section>
# Title
## Sub-title
Here is some content...
Note:
This will only display in the notes window.
```
## Server Side Speaker Notes
In some cases it can be desirable to run notes on a separate device from the one you're presenting on. The Node.js-based notes plugin lets you do this using the same note definitions as its client side counterpart. Include the required scripts by adding the following dependencies:
```javascript
Reveal.initialize({
...
dependencies: [
{ src: 'socket.io/socket.io.js', async: true },
{ src: 'plugin/notes-server/client.js', async: true }
]
});
```
Then:
1. Install [Node.js](http://nodejs.org/)
2. Run ```npm install```
3. Run ```node plugin/notes-server```
## Multiplexing
The multiplex plugin allows your audience to view the slides of the presentation you are controlling on their own phone, tablet or laptop. As the master presentation navigates the slides, all client presentations will update in real time. See a demo at [http://revealjs.jit.su/](http://revealjs.jit.su).
The multiplex plugin needs the following 3 things to operate:
1. Master presentation that has control
2. Client presentations that follow the master
3. Socket.io server to broadcast events from the master to the clients
More details:
#### Master presentation
Served from a static file server accessible (preferably) only to the presenter. This need only be on your (the presenter's) computer. (It's safer to run the master presentation from your own computer, so if the venue's Internet goes down it doesn't stop the show.) An example would be to execute the following commands in the directory of your master presentation:
1. ```npm install node-static```
2. ```static```
If you want to use the speaker notes plugin with your master presentation then make sure you have the speaker notes plugin configured correctly along with the configuration shown below, then execute ```node plugin/notes-server``` in the directory of your master presentation. The configuration below will cause it to connect to the socket.io server as a master, as well as launch your speaker-notes/static-file server.
You can then access your master presentation at ```http://localhost:1947```
Example configuration:
```javascript
Reveal.initialize({
// other options...
multiplex: {
// Example values. To generate your own, see the socket.io server instructions.
secret: '13652805320794272084', // Obtained from the socket.io server. Gives this (the master) control of the presentation
id: '1ea875674b17ca76', // Obtained from socket.io server
url: 'revealjs.jit.su:80' // Location of socket.io server
},
// Don't forget to add the dependencies
dependencies: [
{ src: '//cdnjs.cloudflare.com/ajax/libs/socket.io/0.9.10/socket.io.min.js', async: true },
{ src: 'plugin/multiplex/master.js', async: true },
// and if you want speaker notes
{ src: 'plugin/notes-server/client.js', async: true }
// other dependencies...
]
});
```
#### Client presentation
Served from a publicly accessible static file server. Examples include: GitHub Pages, Amazon S3, Dreamhost, Akamai, etc. The more reliable, the better. Your audience can then access the client presentation via ```http://example.com/path/to/presentation/client/index.html```, with the configuration below causing them to connect to the socket.io server as clients.
Example configuration:
```javascript
Reveal.initialize({
// other options...
multiplex: {
// Example values. To generate your own, see the socket.io server instructions.
secret: null, // null so the clients do not have control of the master presentation
id: '1ea875674b17ca76', // id, obtained from socket.io server
url: 'revealjs.jit.su:80' // Location of socket.io server
},
// Don't forget to add the dependencies
dependencies: [
{ src: '//cdnjs.cloudflare.com/ajax/libs/socket.io/0.9.10/socket.io.min.js', async: true },
{ src: 'plugin/multiplex/client.js', async: true }
// other dependencies...
]
});
```
#### Socket.io server
Server that receives the slideChanged events from the master presentation and broadcasts them out to the connected client presentations. This needs to be publicly accessible. You can run your own socket.io server with the commands:
1. ```npm install```
2. ```node plugin/multiplex```
Or you use the socket.io server at [http://revealjs.jit.su](http://revealjs.jit.su).
You'll need to generate a unique secret and token pair for your master and client presentations. To do so, visit ```http://example.com/token```, where ```http://example.com``` is the location of your socket.io server. Or if you're going to use the socket.io server at [http://revealjs.jit.su](http://revealjs.jit.su), visit [http://revealjs.jit.su/token](http://revealjs.jit.su/token).
You are very welcome to point your presentations at the Socket.io server running at [http://revealjs.jit.su](http://revealjs.jit.su), but availability and stability are not guaranteed. For anything mission critical I recommend you run your own server. It is simple to deploy to nodejitsu, heroku, your own environment, etc.
##### socket.io server as file static server
The socket.io server can play the role of static file server for your client presentation, as in the example at [http://revealjs.jit.su](http://revealjs.jit.su). (Open [http://revealjs.jit.su](http://revealjs.jit.su) in two browsers. Navigate through the slides on one, and the other will update to match.)
Example configuration:
```javascript
Reveal.initialize({
// other options...
multiplex: {
// Example values. To generate your own, see the socket.io server instructions.
secret: null, // null so the clients do not have control of the master presentation
id: '1ea875674b17ca76', // id, obtained from socket.io server
url: 'example.com:80' // Location of your socket.io server
},
// Don't forget to add the dependencies
dependencies: [
{ src: '//cdnjs.cloudflare.com/ajax/libs/socket.io/0.9.10/socket.io.min.js', async: true },
{ src: 'plugin/multiplex/client.js', async: true }
// other dependencies...
]
```
It can also play the role of static file server for your master presentation and client presentations at the same time (as long as you don't want to use speaker notes). (Open [http://revealjs.jit.su](http://revealjs.jit.su) in two browsers. Navigate through the slides on one, and the other will update to match. Navigate through the slides on the second, and the first will update to match.) This is probably not desirable, because you don't want your audience to mess with your slides while you're presenting. ;)
Example configuration:
```javascript
Reveal.initialize({
// other options...
multiplex: {
// Example values. To generate your own, see the socket.io server instructions.
secret: '13652805320794272084', // Obtained from the socket.io server. Gives this (the master) control of the presentation
id: '1ea875674b17ca76', // Obtained from socket.io server
url: 'example.com:80' // Location of your socket.io server
},
// Don't forget to add the dependencies
dependencies: [
{ src: '//cdnjs.cloudflare.com/ajax/libs/socket.io/0.9.10/socket.io.min.js', async: true },
{ src: 'plugin/multiplex/master.js', async: true },
{ src: 'plugin/multiplex/client.js', async: true }
// other dependencies...
]
});
```
## Leap Motion
The Leap Motion plugin lets you utilize your [Leap Motion](https://www.leapmotion.com/) device to control basic navigation of your presentation. The gestures currently supported are:
##### 1 to 2 fingers
Pointer — Point to anything on screen. Move your finger past the device to expand the pointer.
##### 1 hand + 3 or more fingers (left/right/up/down)
Navigate through your slides. See config options to invert movements.
##### 2 hands upwards
Toggle the overview mode. Do it a second time to exit the overview.
#### Config Options
You can edit the following options:
| Property | Default | Description
| ----------------- |:-----------------:| :-------------
| autoCenter | true | Center the pointer based on where you put your finger into the leap motions detection field.
| gestureDelay | 500 | How long to delay between gestures in milliseconds.
| naturalSwipe | true | Swipe as though you were touching a touch screen. Set to false to invert.
| pointerColor | #00aaff | The color of the pointer.
| pointerOpacity | 0.7 | The opacity of the pointer.
| pointerSize | 15 | The minimum height and width of the pointer.
| pointerTolerance | 120 | Bigger = slower pointer.
Example configuration:
```js
Reveal.initialize({
// other options...
leap: {
naturalSwipe : false, // Invert swipe gestures
pointerOpacity : 0.5, // Set pointer opacity to 0.5
pointerColor : '#d80000' // Red pointer
},
dependencies: [
{ src: 'plugin/leap/leap.js', async: true }
]
});
```
## MathJax
If you want to display math equations in your presentation you can easily do so by including this plugin. The plugin is a very thin wrapper around the [MathJax](http://www.mathjax.org/) library. To use it you'll need to include it as a reveal.js dependency, [find our more about dependencies here](#dependencies).
The plugin defaults to using [LaTeX](http://en.wikipedia.org/wiki/LaTeX) but that can be adjusted through the ```math``` configuration object. Note that MathJax is loaded from a remote server. If you want to use it offline you'll need to download a copy of the library and adjust the ```mathjax``` configuration value.
Below is an example of how the plugin can be configured. If you don't intend to change these values you do not need to include the ```math``` config object at all.
```js
Reveal.initialize({
// other options ...
math: {
mathjax: 'http://cdn.mathjax.org/mathjax/latest/MathJax.js',
config: 'TeX-AMS_HTML-full' // See http://docs.mathjax.org/en/latest/config-files.html
},
dependencies: [
{ src: 'plugin/math/math.js', async: true }
]
});
```
Read MathJax's documentation if you need [HTTPS delivery](http://docs.mathjax.org/en/latest/start.html#secure-access-to-the-cdn) or serving of [specific versions](http://docs.mathjax.org/en/latest/configuration.html#loading-mathjax-from-the-cdn) for stability.
## Installation
The **basic setup** is for authoring presentations only. The **full setup** gives you access to all reveal.js features and plugins such as speaker notes as well as the development tasks needed to make changes to the source.
### Basic setup
The core of reveal.js is very easy to install. You'll simply need to download a copy of this repository and open the index.html file directly in your browser.
1. Download the latest version of reveal.js from <https://github.com/hakimel/reveal.js/releases>
2. Unzip and replace the example contents in index.html with your own
3. Open index.html in a browser to view it
### Full setup
Some reveal.js features, like external markdown and speaker notes, require that presentations run from a local web server. The following instructions will set up such a server as well as all of the development tasks needed to make edits to the reveal.js source code.
1. Install [Node.js](http://nodejs.org/)
2. Install [Grunt](http://gruntjs.com/getting-started#installing-the-cli)
4. Clone the reveal.js repository
```sh
$ git clone https://github.com/hakimel/reveal.js.git
```
5. Navigate to the reveal.js folder
```sh
$ cd reveal.js
```
6. Install dependencies
```sh
$ npm install
```
7. Serve the presentation and monitor source files for changes
```sh
$ grunt serve
```
8. Open <http://localhost:8000> to view your presentation
You can change the port by using `grunt serve --port 8001`.
### Folder Structure
- **css/** Core styles without which the project does not function
- **js/** Like above but for JavaScript
- **plugin/** Components that have been developed as extensions to reveal.js
- **lib/** All other third party assets (JavaScript, CSS, fonts)
### Contributing
Please keep the [issue tracker](http://github.com/hakimel/reveal.js/issues) limited to **bug reports**, **feature requests** and **pull requests**. If you are reporting a bug make sure to include information about which browser and operating system you are using as well as the necessary steps to reproduce the issue.
If you have personal support questions use [StackOverflow](http://stackoverflow.com/questions/tagged/reveal.js).
#### Pull requests
- Should follow the coding style of the file you work in, most importantly:
- Tabs to indent
- Single-quoted strings
- Should be made towards the **dev branch**
- Should be submitted from a feature/topic branch (not your master)
- Should not include the minified **reveal.min.js** file
## License
MIT licensed
Copyright (C) 2014 Hakim El Hattab, http://hakim.se
| /rst2html5-tools-0.5.3.tar.gz/rst2html5-tools-0.5.3/html5css3/thirdparty/revealjs/README.md | 0.468791 | 0.934155 | README.md | pypi |
import urllib
import urllib.parse
from docutils import frontend, nodes, writers
# sys.stdout = codecs.getwriter('shift_jis')(sys.stdout)
class Writer(writers.Writer):
# Prevent the filtering of the Meta directive.
supported = ['html']
settings_spec = (
'rST-specific options',
None,
(
(
'Generate {{excerpt}} around the first paragraph',
['--excerpt'],
{
'action': 'store_true',
'validator': frontend.validate_boolean
}
),
))
def translate(self):
self.visitor = ConfluenceTranslator(self.document)
self.visitor.meta = {}
self.document.walkabout(self.visitor)
# Save some metadata as a comment, one per line.
self.output = ""
self.output += self.visitor.astext()
class ConfluenceTranslator(nodes.NodeVisitor):
"""Write output in Confluence Wiki format.
References:
* ReST: http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html
* Confluence Wiki:
http://confluence.atlassian.com/display/DOC/Confluence+Notation+Guide+Overview
"""
empty_methods = [
'depart_Text',
'depart_colspec',
'depart_decoration',
'depart_document',
'depart_field',
'depart_footer',
'depart_line_block',
'depart_raw',
'depart_target',
'depart_tgroup',
'visit_colspec',
'visit_decoration',
'visit_document',
'visit_field',
'visit_line',
'visit_raw',
'visit_tgroup'
]
inCode = False
keepLineBreaks = False
lastTableEntryBar = 0
docinfo = False
generateExcerpt = False
meta = {}
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = document.settings
if self.settings.excerpt:
self.generateExcerpt = True
self.content = []
self.first = True
self.firstParagraph = True
self.list_level = 0
self.section_level = 0
self.list_counter = -1
self.list_prefix = [[]]
self.lineBeginsWithListIndicator = False
self.addedNewline = False
self.table = False
self.table_header = False
self.element_level = 0
self.quote_level = 0
self.figure = False
self.figureImage = False
self.lastGalleryClass = ""
self.inTitle = False
self.openFootnotes = 0
# Block all output
self.block = False
self.footnote = False
self.field_body = False
for method in self.empty_methods:
setattr(self, method, lambda n: None)
def _add(self, string):
if not self.block:
self.addedNewline = False
self.content.append(string)
def _indent(self):
self._add(" " * self.list_level * 2)
def _newline(self, number=1):
if self.addedNewline and self.table:
self.content[self.lastTableEntryBar] += "{div}"
self.tableEntryDiv = True
self._add("\n" * number)
self.addedNewline = True
self.lineBeginsWithListIndicator = False
def _remove_last_newline(self):
if self.addedNewline:
self.content.pop(len(self.content) - 1)
self.addedNewline = False
def astext(self):
return "".join(self.content)
def unknown_visit(self, node):
raise Exception(
"Unknown visit on line %s: %s." % (node.line, repr(node)))
def unknown_departure(self, node):
raise Exception(
"Unknown departure on line %s: %s." % (node.line, repr(node)))
def visit_paragraph(self, node):
if self.firstParagraph and self.generateExcerpt\
and self.element_level == 0:
self._add('{excerpt}')
self.element_level += 1
if not self.first and not self.footnote and not self.field_body\
and self.list_level == 0:
self._newline()
if self.list_level > 0 and not self.lineBeginsWithListIndicator:
self._add(" " * (self.list_level + (self.list_level > 0)))
def depart_paragraph(self, node):
if self.firstParagraph and self.element_level == 1:
if self.generateExcerpt:
self._add('{excerpt}')
self.firstParagraph = False
if not self.footnote and not isinstance(node.parent, nodes.field_body):
self._newline()
self.element_level -= 1
self.first = False
def visit_Text(self, node):
string = node.astext()
if not self.inCode:
string = string.replace("[", "\[")\
.replace('{', '{')\
.replace('}', '}')
if self.keepLineBreaks:
self._add(string)
else:
# rst line break should be removed.
self._add(" ".join(string.split('\n')))
def visit_emphasis(self, node):
self.element_level += 1
self._add("_")
def depart_emphasis(self, node):
self._add("_")
self.element_level -= 1
def visit_strong(self, node):
self.element_level += 1
self._add_space_when_needed()
self._add("*")
def depart_strong(self, node):
self._add("*")
self.element_level -= 1
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def cflAnchorValue(self, name):
return name.replace("-", "")\
.replace(" ", '')\
.replace(u"ä", "a")\
.replace(u"ö", "o")\
.replace(u"ü", "u")\
.replace(u"ß", "s")
def visit_target(self, node):
if 'refid' not in node:
return
self._add("{anchor:" + self.cflAnchorValue(node["refid"]) + "}")
self._newline()
def visit_reference(self, node):
if 'refuri' in node:
if node.children[0].astext() == node["refuri"]\
and "://" in node["refuri"]:
if self.table and self._endswidth("|"):
self._add(" ")
self._add(self.escapeUri(node.children[0].astext()))
elif "://" in node["refuri"]:
self._add("[")
self._add(node.children[0].astext() + "|")
self._add(self.escapeUri(node["refuri"]) + "]")
else:
self._add("[")
self._add(node.children[0].astext() + "|")
self._add(urllib.parse.unquote(node["refuri"]) + "]")
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
self._add("[")
self._add(node.children[0].astext() + "|")
self._add("#" + self.cflAnchorValue(node["refid"]) + "]")
raise nodes.SkipNode
def escapeUri(self, uri):
return uri.replace("[", "\[").replace("]", "\]")
def depart_reference(self, node):
pass
def visit_footnote_reference(self, node):
self.openFootnotes += 1
self._add("^")
self._add(node.children[0].astext())
self._add("^")
raise nodes.SkipNode
def depart_footnote_reference(self, node):
pass
def visit_footnote(self, node):
self.openFootnotes -= 1
self.footnote = True
self._newline()
self._add("bq. ")
def depart_footnote(self, node):
self.footnote = False
if self.openFootnotes == 0:
self._newline()
def visit_label(self, node):
self._add("^")
self._add(node.astext())
self._add("^ ")
raise nodes.SkipNode
def depart_label(self, node):
pass
def visit_literal_block(self, node):
self.element_level += 1
self.keepLineBreaks = True
self.inCode = True
self._add('{code}')
self._newline()
def depart_literal_block(self, node):
self.keepLineBreaks = False
self.inCode = False
self._newline()
self._add('{code}')
self._newline()
self.element_level -= 1
def visit_literal(self, node):
self.element_level += 1
self._add_space_when_needed()
self._add('{{')
def depart_literal(self, node):
self._add('}}')
self.element_level -= 1
def visit_footer(self, node):
pass
# ----------------------------------------------------
# title
def visit_title(self, node):
self.element_level += 1
if self.section_level == 0:
self.section_level = 1
if not self.first:
self._newline()
self._add("h" + str(self.section_level) + ". ")
self.inTitle = True
def depart_title(self, node):
anchorname = self.cflAnchorValue(node.astext()).lower()
self._add('{anchor:' + anchorname + '}')
self._newline(2)
self.first = True
self.inTitle = False
self.element_level -= 1
def visit_subtitle(self, node):
self.element_level += 1
self._add("h" + str(self.section_level) + ". ")
def depart_subtitle(self, node):
self._newline(2)
self.element_level -= 1
# bullet list
def visit_bullet_list(self, node):
self.element_level += 1
self.list_level += 1
self.list_prefix[-1].append("*")
def depart_bullet_list(self, node):
self.list_level -= 1
self.list_prefix[-1].pop()
self.element_level -= 1
def visit_list_item(self, node):
self.element_level += 1
self._add("".join(self.list_prefix[-1]) + " ")
self.first = True
self.lineBeginsWithListIndicator = True
def depart_list_item(self, node):
self.element_level -= 1
# enumerated list
def visit_enumerated_list(self, node):
self.element_level += 1
self.list_prefix[-1].append("#")
self.list_counter = 1
self.list_level += 1
def depart_enumerated_list(self, node):
self.list_counter = -1
self.list_level -= 1
self.list_prefix[-1].pop()
self.element_level -= 1
# admonitions
def visit_info(self, node):
self.element_level += 1
self._add("{info}")
self.do_visit_admonition()
def depart_info(self, node):
self._add("{info}")
self.do_depart_admonition()
-self.element_level
def visit_note(self, node):
self.element_level += 1
self._add("{note}")
self.do_visit_admonition()
def depart_note(self, node):
self._add("{note}")
self.do_depart_admonition()
self.element_level -= 1
def visit_tip(self, node):
self.element_level += 1
self._add("{tip}")
self.do_visit_admonition()
def depart_tip(self, node):
self._add("{tip}")
self.do_depart_admonition()
self.element_level -= 1
def visit_docinfo(self, node):
self.element_level += 1
self.table = True
self.docinfo = True
def depart_docinfo(self, node):
self.table = False
self.docinfo = False
self._newline(2)
self.element_level -= 1
def visit_meta(self, node):
self.element_level += 1
name = node.get('name')
content = node.get('content')
self.meta[name] = content
def depart_meta(self, node):
self.element_level -= 1
def _docinfo_field(self, node):
# non-standard docinfo field, becomes a generic field element.
# and render as normal table fields.
if self.docinfo:
self._add("||%s|" % node.__class__.__name__)
self.visit_field_body(node)
def visit_author(self, node):
return self._docinfo_field(node)
def depart_author(self, node):
if self.docinfo:
self.depart_field_body(node)
def visit_contact(self, node):
return self._docinfo_field(node)
def depart_contact(self, node):
if self.docinfo:
self.depart_field_body(node)
def visit_date(self, node):
return self._docinfo_field(node)
def depart_date(self, node):
if self.docinfo:
self.depart_field_body(node)
def visit_status(self, node):
return self._docinfo_field(node)
def depart_status(self, node):
if self.docinfo:
self.depart_field_body(node)
def visit_version(self, node):
return self._docinfo_field(node)
def depart_version(self, node):
if self.docinfo:
self.depart_field_body(node)
def visit_revision(self, node):
return self._docinfo_field(node)
def depart_revision(self, node):
if self.docinfo:
self.depart_field_body(node)
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_warning(self, node):
self.element_level += 1
self._add("{warning}")
self.do_visit_admonition()
def depart_warning(self, node):
self._add("{warning}")
self.do_depart_admonition()
self.element_level -= 1
# admonition helpers
def do_visit_admonition(self):
self.element_level += 1
self.list_prefix.append([])
def do_depart_admonition(self):
self.list_prefix.pop()
if self.list_level == 0:
self._newline(2)
else:
self._newline()
self.element_level -= 1
# image
def visit_image(self, node):
self.element_level += 1
if 'classes' in node:
for classval in node['classes']:
if classval.startswith("gallery-"):
self._print_image_gallery(node, classval)
return
if self.figure:
self.figureImage = node
else:
self._print_image(node)
def depart_image(self, node):
self.element_level -= 1
def _print_image(self, node):
uri = node['uri']
atts = {}
if 'alt' in node:
atts['alt'] = node['alt']
if 'title' in node:
atts['title'] = node['title']
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
# confluence has no percentages, so we simply make thumbnail
atts['thumbnail'] = True
if 'align' in node:
atts['align'] = node['align']
attributes = []
for att in atts:
if atts[att] is True:
attributes.append(att)
else:
attributes.append(att + "=" + atts[att])
self._add("!")
self._add(uri)
if attributes:
self._add("|")
self._add(",".join(attributes))
self._add("!")
self._newline()
def _print_image_gallery(self, node, galleryclass):
uri = node['uri']
if galleryclass == self.lastGalleryClass\
and self.content[-1].startswith("{gallery:"):
self.content[-1] = self.content[-1][0:-2] + "," + uri + "}\n"
else:
self.lastGalleryClass = galleryclass
self._add("{gallery:include=" + uri + "}\n")
# figure
def visit_figure(self, node):
self.element_level += 1
self.figure = True
def depart_figure(self, node):
if not self.figureImage:
# happens in gallery mode
return
foo = vars(node)['attributes']
for att in foo:
self.figureImage[att] = foo[att]
self.figure = False
self._print_image(self.figureImage)
self.figureImage = None
self.element_level -= 1
def visit_caption(self, node):
self.figureImage['title'] = node.children[0]
raise nodes.SkipNode
# table
def visit_table(self, node):
self.element_level += 1
self.table = True
# raise nodes.SkipNode
def depart_table(self, node):
self.table = False
self._newline()
self.element_level -= 1
def visit_thead(self, node):
self.element_level += 1
# self._add("||")
self.table_header = True
def depart_thead(self, node):
self.table_header = False
self.element_level -= 1
def visit_tbody(self, node):
self.element_level += 1
def depart_tbody(self, node):
self.element_level -= 1
def visit_row(self, node):
self.element_level += 1
def depart_row(self, node):
if self.table_header:
self._add("||")
else:
self._add("|")
self._newline()
self.element_level -= 1
def visit_entry(self, node):
self.element_level += 1
if not self.table:
return
self.first = True
self.tableEntryDiv = False
if self.table_header:
self._add("||")
else:
self._add("|")
self.lastTableEntryBar = len(self.content) - 1
def depart_entry(self, node):
if not self.table:
return
self._remove_last_newline()
self.first = False
if self.tableEntryDiv:
# work around bug in confluence
# https://jira.atlassian.com/browse/CONF-9785
self._add("{div}")
self.element_level -= 1
"""Definition list
Confluence wiki does not support definition list
Definition list is converted to h6 section
"""
def visit_definition_list(self, node):
pass
def depart_definition_list(self, node):
pass
def visit_definition_list_item(self, node):
self.has_classifier = False
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.element_level += 1
self._add("h6. ")
def depart_term(self, node):
self._newline()
self.element_level -= 1
def visit_classifier(self, node):
if self.has_classifier:
self._add(", ")
self._add("_")
self.has_classifier = True
def depart_classifier(self, node):
self._add("_")
def visit_definition(self, node):
self.element_level += 1
if self.has_classifier:
self._newline()
self.first = True
def depart_definition(self, node):
self._newline()
self.element_level -= 1
def visit_block_quote(self, nde):
self.element_level += 1
if self.quote_level == 0:
self._add("{quote}")
self.quote_level += 1
def depart_block_quote(self, nde):
if self.quote_level == 1:
self._add("{quote}")
self._newline()
self.quote_level -= 1
self.element_level -= 1
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
raise nodes.SkipNode
visit_comment = invisible_visit
def visit_topic(self, node):
self._add("{toc}")
self._newline(2)
raise nodes.SkipNode
def visit_system_message(self, node):
self.element_level += 1
self._add(
"{warning:title="
+ "System Message: %s/%s" % (node['type'], node['level'])
+ "}")
self._newline()
self._add('{{' + node['source'] + "}}#" + str(node['line']))
def depart_system_message(self, node):
self._add("{warning}")
self.element_level -= 1
# field lists
def visit_field_list(self, node):
self.element_level += 1
self._newline()
def depart_field_list(self, node):
self._newline()
self.element_level -= 1
def visit_field_name(self, node):
self.element_level += 1
self._add("||")
def depart_field_name(self, node):
self.element_level += 1
def visit_field_body(self, node):
self.element_level += 1
self.field_body = True
self._add("|")
def depart_field_body(self, node):
self.field_body = False
self._add("|")
self._newline()
self.element_level -= 1
# line blocks
def visit_line_block(self, node):
if not self.field_body:
self._newline()
def depart_line(self, node):
self._newline()
# roles http://docutils.sourceforge.net/docs/ref/rst/roles.html
def visit_title_reference(self, node):
self._add("_")
def depart_title_reference(self, node):
self._add("_")
def _add_space_when_needed(self):
if len(self.content) == 0:
return
lastline = self.content[len(self.content) - 1]
if not lastline.endswith(" ") and not lastline.endswith("\n"):
self._add(" ")
def _endswidth(self, string):
lastline = self.content[len(self.content) - 1]
return lastline.endswith(string)
# substitution definitions
def visit_substitution_definition(self, node):
raise nodes.SkipNode | /rst2jira-0.7.1-py3-none-any.whl/rst2confluence/confluence.py | 0.418816 | 0.17245 | confluence.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.